Implemented VMX APIC Registers Virtualization and VMX Virtual Interrupt Delivery emulation
Bugfix: VMX: VmEntry should do TPR Virtualization (TPR Shadow + APIC Access Virtualization case is affected) and even could possibly cause TPR Threshold VMEXIT
This commit is contained in:
parent
6276173b85
commit
744001e35e
@ -512,6 +512,9 @@ typedef Bit32u bx_phy_address;
|
||||
// on any system.
|
||||
typedef Bit32u bx_bool;
|
||||
|
||||
#define BX_TRUE (1)
|
||||
#define BX_FALSE (0)
|
||||
|
||||
#if BX_WITH_MACOS
|
||||
# define bx_ptr_t char *
|
||||
#else
|
||||
|
@ -1101,16 +1101,18 @@ public: // for now...
|
||||
#define BX_EVENT_NMI (1 << 0)
|
||||
#define BX_EVENT_SMI (1 << 1)
|
||||
#define BX_EVENT_INIT (1 << 2)
|
||||
#define BX_EVENT_CODE_BREAKPOINT_ASSIST (1 << 3)
|
||||
#define BX_EVENT_VMX_MONITOR_TRAP_FLAG (1 << 4)
|
||||
#define BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED (1 << 5)
|
||||
#define BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING (1 << 6)
|
||||
#define BX_EVENT_VMX_NMI_WINDOW_EXITING (1 << 7)
|
||||
#define BX_EVENT_SVM_VIRQ_PENDING (1 << 8)
|
||||
#define BX_EVENT_PENDING_VMX_VIRTUAL_INTR (1 << 9)
|
||||
#define BX_EVENT_PENDING_INTR (1 << 10)
|
||||
#define BX_EVENT_PENDING_LAPIC_INTR (1 << 11)
|
||||
#define BX_EVENT_VMX_VTPR_UPDATE (1 << 12)
|
||||
#define BX_EVENT_CODE_BREAKPOINT_ASSIST (1 << 3)
|
||||
#define BX_EVENT_VMX_MONITOR_TRAP_FLAG (1 << 4)
|
||||
#define BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED (1 << 5)
|
||||
#define BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING (1 << 6)
|
||||
#define BX_EVENT_VMX_NMI_WINDOW_EXITING (1 << 7)
|
||||
#define BX_EVENT_SVM_VIRQ_PENDING (1 << 8)
|
||||
#define BX_EVENT_PENDING_VMX_VIRTUAL_INTR (1 << 9)
|
||||
#define BX_EVENT_PENDING_INTR (1 << 10)
|
||||
#define BX_EVENT_PENDING_LAPIC_INTR (1 << 11)
|
||||
#define BX_EVENT_VMX_VTPR_UPDATE (1 << 12)
|
||||
#define BX_EVENT_VMX_VEOI_UPDATE (1 << 13)
|
||||
#define BX_EVENT_VMX_VIRTUAL_APIC_WRITE (1 << 14)
|
||||
Bit32u pending_event;
|
||||
Bit32u event_mask;
|
||||
Bit32u async_event;
|
||||
@ -4332,12 +4334,24 @@ public: // for now...
|
||||
#endif
|
||||
#if BX_SUPPORT_X86_64
|
||||
BX_SMF bx_bool is_virtual_apic_page(bx_phy_address paddr) BX_CPP_AttrRegparmN(1);
|
||||
BX_SMF void VMX_Virtual_Apic_Read(bx_phy_address paddr, unsigned len, void *data);
|
||||
BX_SMF bx_bool virtual_apic_access_vmexit(unsigned offset, unsigned len) BX_CPP_AttrRegparmN(2);
|
||||
BX_SMF bx_phy_address VMX_Virtual_Apic_Read(bx_phy_address paddr, unsigned len, void *data);
|
||||
BX_SMF void VMX_Virtual_Apic_Write(bx_phy_address paddr, unsigned len, void *data);
|
||||
BX_SMF void VMX_Write_VTPR(Bit8u vtpr);
|
||||
BX_SMF void VMX_TPR_Virtualization(void);
|
||||
BX_SMF void VMX_Write_VICR(void);
|
||||
BX_SMF Bit32u VMX_Read_Virtual_APIC(unsigned offset);
|
||||
BX_SMF void VMX_Write_Virtual_APIC(unsigned offset, Bit32u val32);
|
||||
BX_SMF void VMX_TPR_Virtualization(void);
|
||||
BX_SMF bx_bool Virtualize_X2APIC_Write(unsigned msr, Bit64u val_64);
|
||||
BX_SMF void VMX_Virtual_Apic_Access_Trap(void);
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
BX_SMF void vapic_set_vector(unsigned apic_arrbase, Bit8u vector);
|
||||
BX_SMF Bit8u vapic_clear_and_find_highest_priority_int(unsigned apic_arrbase, Bit8u vector);
|
||||
BX_SMF void VMX_PPR_Virtualization(void);
|
||||
BX_SMF void VMX_EOI_Virtualization(void);
|
||||
BX_SMF void VMX_Self_IPI_Virtualization(Bit8u vector);
|
||||
BX_SMF void VMX_Evaluate_Pending_Virtual_Interrupts(void);
|
||||
BX_SMF void VMX_Deliver_Virtual_Interrupt(void);
|
||||
#endif
|
||||
#endif
|
||||
// vmexit reasons
|
||||
BX_SMF void VMexit_Instruction(bxInstruction_c *i, Bit32u reason, bx_bool rw = BX_READ) BX_CPP_AttrRegparmN(3);
|
||||
|
@ -162,8 +162,7 @@ typedef bx_cpuid_t* (*bx_create_cpuid_method)(BX_CPU_C *cpu);
|
||||
#define BX_VMX_PAUSE_LOOP_EXITING (1 << 15) /* Pause Loop Exiting */
|
||||
#define BX_VMX_EPTP_SWITCHING (1 << 16) /* EPTP switching (VM Function 0) */
|
||||
#define BX_VMX_EPT_ACCESS_DIRTY (1 << 17) /* Extended Page Tables (EPT) A/D Bits */
|
||||
#define BX_VMX_VIRTUAL_APIC_REGS (1 << 18) /* APIC Registers Virtualization */
|
||||
#define BX_VMX_VIRTUAL_INTERRUPTS (1 << 19) /* Virtual Interrupt Delivery */
|
||||
#define BX_VMX_VINTR_DELIVERY (1 << 18) /* Virtual Interrupt Delivery */
|
||||
|
||||
// CPUID defines - STD features CPUID[0x00000001].EDX
|
||||
// ----------------------------
|
||||
|
@ -1322,7 +1322,8 @@ void BX_CPU_C::WriteCR8(bxInstruction_c *i, bx_address val)
|
||||
|
||||
#if BX_SUPPORT_VMX && BX_SUPPORT_X86_64
|
||||
if (BX_CPU_THIS_PTR in_vmx_guest && VMEXIT(VMX_VM_EXEC_CTRL2_TPR_SHADOW)) {
|
||||
VMX_Write_VTPR(tpr);
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_TPR, tpr);
|
||||
VMX_TPR_Virtualization();
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
@ -36,6 +36,8 @@ bx_bool BX_CPU_C::handleWaitForEvent(void)
|
||||
if ((is_pending(BX_EVENT_PENDING_INTR | BX_EVENT_PENDING_LAPIC_INTR) && (BX_CPU_THIS_PTR get_IF() || BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_MWAIT_IF)) ||
|
||||
is_pending(BX_EVENT_NMI | BX_EVENT_SMI | BX_EVENT_INIT |
|
||||
BX_EVENT_VMX_VTPR_UPDATE |
|
||||
BX_EVENT_VMX_VEOI_UPDATE |
|
||||
BX_EVENT_VMX_VIRTUAL_APIC_WRITE |
|
||||
BX_EVENT_VMX_MONITOR_TRAP_FLAG |
|
||||
BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED |
|
||||
BX_EVENT_VMX_NMI_WINDOW_EXITING))
|
||||
@ -102,7 +104,17 @@ void BX_CPU_C::InterruptAcknowledge(void)
|
||||
#endif
|
||||
|
||||
#if BX_SUPPORT_VMX
|
||||
VMexit_ExtInterrupt();
|
||||
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
if (is_pending(BX_EVENT_PENDING_VMX_VIRTUAL_INTR)) {
|
||||
VMX_Deliver_Virtual_Interrupt();
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
VMexit_ExtInterrupt();
|
||||
}
|
||||
#endif
|
||||
|
||||
// NOTE: similar code in ::take_irq()
|
||||
@ -174,11 +186,13 @@ bx_bool BX_CPU_C::handleAsyncEvent(void)
|
||||
BX_CPU_THIS_PTR debug_trap &= BX_DEBUG_SINGLE_STEP_BIT;
|
||||
#endif
|
||||
|
||||
// TPR shadow takes priority over SMI, INIT and lower priority events and
|
||||
// APIC virtualization trap take priority over SMI, INIT and lower priority events and
|
||||
// not blocked by EFLAGS.IF or interrupt inhibits by MOV_SS and STI
|
||||
#if BX_SUPPORT_X86_64 && BX_SUPPORT_VMX
|
||||
if (is_unmasked_event_pending(BX_EVENT_VMX_VTPR_UPDATE)) {
|
||||
VMX_TPR_Virtualization();
|
||||
#if BX_SUPPORT_VMX && BX_SUPPORT_X86_64
|
||||
if (is_unmasked_event_pending(BX_EVENT_VMX_VTPR_UPDATE |
|
||||
BX_EVENT_VMX_VEOI_UPDATE | BX_EVENT_VMX_VIRTUAL_APIC_WRITE))
|
||||
{
|
||||
VMX_Virtual_Apic_Access_Trap();
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -288,7 +302,8 @@ bx_bool BX_CPU_C::handleAsyncEvent(void)
|
||||
VMexit(VMX_VMEXIT_INTERRUPT_WINDOW, 0);
|
||||
}
|
||||
#endif
|
||||
else if (is_unmasked_event_pending(BX_EVENT_PENDING_INTR | BX_EVENT_PENDING_LAPIC_INTR))
|
||||
else if (is_unmasked_event_pending(BX_EVENT_PENDING_INTR | BX_EVENT_PENDING_LAPIC_INTR |
|
||||
BX_EVENT_PENDING_VMX_VIRTUAL_INTR))
|
||||
{
|
||||
InterruptAcknowledge();
|
||||
}
|
||||
|
@ -369,11 +369,16 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDMSR(bxInstruction_c *i)
|
||||
#endif
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
if (BX_CPU_THIS_PTR in_vmx_guest && index == 0x808) {
|
||||
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
||||
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUALIZE_X2APIC_MODE)) {
|
||||
RAX = VMX_Read_Virtual_APIC(BX_LAPIC_TPR) & 0xff;
|
||||
RDX = 0;
|
||||
BX_NEXT_INSTR(i);
|
||||
if (index >= 0x800 && index <= 0x8FF) {
|
||||
if (index == 0x808 || SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_REGISTERS)) {
|
||||
unsigned vapic_offset = (index & 0xff) << 4;
|
||||
RAX = VMX_Read_Virtual_APIC(vapic_offset);
|
||||
RDX = VMX_Read_Virtual_APIC(vapic_offset + 4);
|
||||
BX_NEXT_INSTR(i);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -859,6 +864,8 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::WRMSR(bxInstruction_c *i)
|
||||
exception(BX_GP_EXCEPTION, 0);
|
||||
}
|
||||
|
||||
invalidate_prefetch_q();
|
||||
|
||||
Bit64u val_64 = ((Bit64u) EDX << 32) | EAX;
|
||||
Bit32u index = ECX;
|
||||
|
||||
@ -874,10 +881,10 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::WRMSR(bxInstruction_c *i)
|
||||
#endif
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
if (BX_CPU_THIS_PTR in_vmx_guest && index == 0x808) {
|
||||
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
||||
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUALIZE_X2APIC_MODE)) {
|
||||
VMX_Write_VTPR(AL);
|
||||
BX_NEXT_INSTR(i);
|
||||
if (Virtualize_X2APIC_Write(index, val_64))
|
||||
BX_NEXT_INSTR(i);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -2013,8 +2013,7 @@ void BX_CPU_C::access_read_physical(bx_phy_address paddr, unsigned len, void *da
|
||||
{
|
||||
#if BX_SUPPORT_VMX && BX_SUPPORT_X86_64
|
||||
if (is_virtual_apic_page(paddr)) {
|
||||
VMX_Virtual_Apic_Read(paddr, len, data);
|
||||
return;
|
||||
paddr = VMX_Virtual_Apic_Read(paddr, len, data);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -33,17 +33,40 @@ bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::is_virtual_apic_page(bx_phy_address pad
|
||||
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
||||
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
||||
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES))
|
||||
if (PPFOf(paddr) == PPFOf(vm->apic_access_page)) return 1;
|
||||
if (PPFOf(paddr) == PPFOf(vm->apic_access_page)) return BX_TRUE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return BX_FALSE;
|
||||
}
|
||||
|
||||
bx_bool BX_CPP_AttrRegparmN(2) BX_CPU_C::virtual_apic_access_vmexit(unsigned offset, unsigned len)
|
||||
{
|
||||
if((offset & ~0x3) != ((offset+len-1) & ~0x3)) {
|
||||
BX_ERROR(("Virtual APIC access at offset 0x%08x spans 32-bit boundary !", offset));
|
||||
return BX_TRUE;
|
||||
}
|
||||
|
||||
if (is_pending(BX_EVENT_VMX_VTPR_UPDATE | BX_EVENT_VMX_VEOI_UPDATE | BX_EVENT_VMX_VIRTUAL_APIC_WRITE)) {
|
||||
if (BX_CPU_THIS_PTR vmcs.apic_access != offset) {
|
||||
BX_ERROR(("Second APIC virtualization at offset 0x%08x (first access at offset 0x%08x)", offset, BX_CPU_THIS_PTR vmcs.apic_access));
|
||||
return BX_TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
// access is not instruction fetch because cpu::prefetch will crash them
|
||||
if (! VMEXIT(VMX_VM_EXEC_CTRL2_TPR_SHADOW) || len > 4 || offset >= 0x400)
|
||||
return BX_TRUE;
|
||||
|
||||
BX_CPU_THIS_PTR vmcs.apic_access = offset;
|
||||
return BX_FALSE;
|
||||
}
|
||||
|
||||
Bit32u BX_CPU_C::VMX_Read_Virtual_APIC(unsigned offset)
|
||||
{
|
||||
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.virtual_apic_page_addr + offset;
|
||||
Bit32u field32;
|
||||
access_read_physical(pAddr, 4, (Bit8u*)(&field32));
|
||||
// must avoid recursive call to the function when VMX APIC access page = VMX Virtual Apic Page
|
||||
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, pAddr, 4, (Bit8u*)(&field32));
|
||||
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 4, BX_READ, BX_VMX_VAPIC_ACCESS, (Bit8u*)(&field32));
|
||||
return field32;
|
||||
}
|
||||
@ -51,53 +74,178 @@ Bit32u BX_CPU_C::VMX_Read_Virtual_APIC(unsigned offset)
|
||||
void BX_CPU_C::VMX_Write_Virtual_APIC(unsigned offset, Bit32u val32)
|
||||
{
|
||||
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.virtual_apic_page_addr + offset;
|
||||
access_write_physical(pAddr, 4, (Bit8u*)(&val32));
|
||||
// must avoid recursive call to the function when VMX APIC access page = VMX Virtual Apic Page
|
||||
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS, pAddr, 4, (Bit8u*)(&val32));
|
||||
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 4, BX_WRITE, BX_VMX_VAPIC_ACCESS, (Bit8u*)(&val32));
|
||||
}
|
||||
|
||||
void BX_CPU_C::VMX_Write_VTPR(Bit8u vtpr)
|
||||
void BX_CPU_C::VMX_Write_VICR(void)
|
||||
{
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_TPR, vtpr);
|
||||
Bit32u vicr = VMX_Read_Virtual_APIC(BX_LAPIC_ICR_LO);
|
||||
|
||||
signal_event(BX_EVENT_VMX_VTPR_UPDATE);
|
||||
unsigned dest_shorthand = (vicr >> 18) & 0x3;
|
||||
Bit8u vector = vicr & 0xff;
|
||||
|
||||
// reserved bits (31:20, 17:16, 13), 15 (trigger mode), 12 (delivery status), 10:8 (delivery mode) must be 0
|
||||
// destination shorthand: must be self
|
||||
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY) &&
|
||||
(vicr & 0xfff3b700) == 0 && (dest_shorthand == 0x1) && vector >= 16)
|
||||
{
|
||||
VMX_Self_IPI_Virtualization(vector);
|
||||
}
|
||||
else {
|
||||
VMexit(VMX_VMEXIT_APIC_WRITE, BX_LAPIC_ICR_LO); // trap-like vmexit
|
||||
}
|
||||
}
|
||||
|
||||
void BX_CPU_C::VMX_Virtual_Apic_Read(bx_phy_address paddr, unsigned len, void *data)
|
||||
bx_phy_address BX_CPU_C::VMX_Virtual_Apic_Read(bx_phy_address paddr, unsigned len, void *data)
|
||||
{
|
||||
BX_ASSERT(SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES));
|
||||
|
||||
BX_INFO(("Virtual Apic RD 0x" FMT_ADDRX " len = %d", paddr, len));
|
||||
|
||||
Bit32u offset = PAGE_OFFSET(paddr);
|
||||
|
||||
bx_bool vmexit = virtual_apic_access_vmexit(offset, len);
|
||||
|
||||
// access is not instruction fetch because cpu::prefetch will crash them
|
||||
if (VMEXIT(VMX_VM_EXEC_CTRL2_TPR_SHADOW) && offset == BX_LAPIC_TPR && len <= 4) {
|
||||
// VTPR access
|
||||
Bit32u vtpr = VMX_Read_Virtual_APIC(BX_LAPIC_TPR);
|
||||
if (len == 1)
|
||||
*((Bit8u *) data) = vtpr & 0xff;
|
||||
else if (len == 2)
|
||||
*((Bit16u *) data) = vtpr & 0xffff;
|
||||
else if (len == 4)
|
||||
*((Bit32u *) data) = vtpr;
|
||||
else
|
||||
BX_PANIC(("PANIC: Unsupported Virtual APIC access len = 3 !"));
|
||||
return;
|
||||
if (! vmexit) {
|
||||
|
||||
if (!SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_REGISTERS)) {
|
||||
// if 'Virtualize Apic Registers' control is disabled allow only aligned access to VTPR
|
||||
if (offset != BX_LAPIC_TPR) vmexit = 1;
|
||||
}
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
switch(offset & 0x3fc) {
|
||||
case BX_LAPIC_ID:
|
||||
case BX_LAPIC_VERSION:
|
||||
case BX_LAPIC_TPR:
|
||||
case BX_LAPIC_EOI:
|
||||
case BX_LAPIC_LDR:
|
||||
case BX_LAPIC_DESTINATION_FORMAT:
|
||||
case BX_LAPIC_SPURIOUS_VECTOR:
|
||||
case BX_LAPIC_ISR1:
|
||||
case BX_LAPIC_ISR2:
|
||||
case BX_LAPIC_ISR3:
|
||||
case BX_LAPIC_ISR4:
|
||||
case BX_LAPIC_ISR5:
|
||||
case BX_LAPIC_ISR6:
|
||||
case BX_LAPIC_ISR7:
|
||||
case BX_LAPIC_ISR8:
|
||||
case BX_LAPIC_TMR1:
|
||||
case BX_LAPIC_TMR2:
|
||||
case BX_LAPIC_TMR3:
|
||||
case BX_LAPIC_TMR4:
|
||||
case BX_LAPIC_TMR5:
|
||||
case BX_LAPIC_TMR6:
|
||||
case BX_LAPIC_TMR7:
|
||||
case BX_LAPIC_TMR8:
|
||||
case BX_LAPIC_IRR1:
|
||||
case BX_LAPIC_IRR2:
|
||||
case BX_LAPIC_IRR3:
|
||||
case BX_LAPIC_IRR4:
|
||||
case BX_LAPIC_IRR5:
|
||||
case BX_LAPIC_IRR6:
|
||||
case BX_LAPIC_IRR7:
|
||||
case BX_LAPIC_IRR8:
|
||||
case BX_LAPIC_ESR:
|
||||
case BX_LAPIC_ICR_LO:
|
||||
case BX_LAPIC_ICR_HI:
|
||||
case BX_LAPIC_LVT_TIMER:
|
||||
case BX_LAPIC_LVT_THERMAL:
|
||||
case BX_LAPIC_LVT_PERFMON:
|
||||
case BX_LAPIC_LVT_LINT0:
|
||||
case BX_LAPIC_LVT_LINT1:
|
||||
case BX_LAPIC_LVT_ERROR:
|
||||
case BX_LAPIC_TIMER_INITIAL_COUNT:
|
||||
case BX_LAPIC_TIMER_DIVIDE_CFG:
|
||||
break;
|
||||
|
||||
default:
|
||||
vmexit = 1;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
Bit32u qualification = offset |
|
||||
if (vmexit) {
|
||||
Bit32u qualification = offset |
|
||||
((BX_CPU_THIS_PTR in_event) ? VMX_APIC_ACCESS_DURING_EVENT_DELIVERY : VMX_APIC_READ_INSTRUCTION_EXECUTION);
|
||||
VMexit(VMX_VMEXIT_APIC_ACCESS, qualification);
|
||||
VMexit(VMX_VMEXIT_APIC_ACCESS, qualification);
|
||||
}
|
||||
|
||||
// remap access to virtual apic page
|
||||
paddr = BX_CPU_THIS_PTR vmcs.virtual_apic_page_addr + offset;
|
||||
BX_NOTIFY_PHY_MEMORY_ACCESS(paddr, len, BX_READ, BX_VMX_VAPIC_ACCESS, (Bit8u*) data);
|
||||
return paddr;
|
||||
}
|
||||
|
||||
void BX_CPU_C::VMX_Virtual_Apic_Write(bx_phy_address paddr, unsigned len, void *data)
|
||||
{
|
||||
BX_ASSERT(SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES));
|
||||
|
||||
BX_INFO(("Virtual Apic WR 0x" FMT_ADDRX " len = %d", paddr, len));
|
||||
|
||||
Bit32u offset = PAGE_OFFSET(paddr);
|
||||
|
||||
if (VMEXIT(VMX_VM_EXEC_CTRL2_TPR_SHADOW) && offset == BX_LAPIC_TPR && len <= 4) {
|
||||
// VTPR access
|
||||
VMX_Write_VTPR(*((Bit8u *) data));
|
||||
return;
|
||||
bx_bool vmexit = virtual_apic_access_vmexit(offset, len);
|
||||
|
||||
if (! vmexit) {
|
||||
|
||||
if (offset == BX_LAPIC_TPR) {
|
||||
Bit8u vtpr = *((Bit8u *) data);
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_TPR, vtpr);
|
||||
signal_event(BX_EVENT_VMX_VTPR_UPDATE);
|
||||
return;
|
||||
}
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY)) {
|
||||
if (offset == BX_LAPIC_EOI) {
|
||||
signal_event(BX_EVENT_VMX_VEOI_UPDATE);
|
||||
}
|
||||
}
|
||||
|
||||
switch(offset & 0x3fc) {
|
||||
case BX_LAPIC_ID:
|
||||
case BX_LAPIC_TPR:
|
||||
case BX_LAPIC_ICR_HI:
|
||||
case BX_LAPIC_LDR:
|
||||
case BX_LAPIC_DESTINATION_FORMAT:
|
||||
case BX_LAPIC_SPURIOUS_VECTOR:
|
||||
case BX_LAPIC_ESR:
|
||||
case BX_LAPIC_LVT_TIMER:
|
||||
case BX_LAPIC_LVT_THERMAL:
|
||||
case BX_LAPIC_LVT_PERFMON:
|
||||
case BX_LAPIC_LVT_LINT0:
|
||||
case BX_LAPIC_LVT_LINT1:
|
||||
case BX_LAPIC_LVT_ERROR:
|
||||
case BX_LAPIC_TIMER_INITIAL_COUNT:
|
||||
case BX_LAPIC_TIMER_DIVIDE_CFG:
|
||||
// VMX_VMEXIT_APIC_ACCESS if the control is disabled
|
||||
if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_REGISTERS)) break;
|
||||
// else fall through
|
||||
|
||||
case BX_LAPIC_EOI:
|
||||
case BX_LAPIC_ICR_LO:
|
||||
// VMX_VMEXIT_APIC_ACCESS if both controls are disabled
|
||||
if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY) &&
|
||||
! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_REGISTERS)) break;
|
||||
// else fall through
|
||||
|
||||
// remap access to virtual apic page
|
||||
paddr = BX_CPU_THIS_PTR vmcs.virtual_apic_page_addr + offset;
|
||||
// must avoid recursive call to the function when VMX APIC access page = VMX Virtual Apic Page
|
||||
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS, paddr, len, (Bit8u *) data);
|
||||
BX_NOTIFY_PHY_MEMORY_ACCESS(paddr, len, BX_WRITE, BX_VMX_VAPIC_ACCESS, (Bit8u *) data);
|
||||
signal_event(BX_EVENT_VMX_VIRTUAL_APIC_WRITE);
|
||||
return;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
Bit32u qualification = offset |
|
||||
@ -105,14 +253,256 @@ void BX_CPU_C::VMX_Virtual_Apic_Write(bx_phy_address paddr, unsigned len, void *
|
||||
VMexit(VMX_VMEXIT_APIC_ACCESS, qualification);
|
||||
}
|
||||
|
||||
void BX_CPU_C::VMX_TPR_Virtualization(void)
|
||||
{
|
||||
Bit8u vtpr = (Bit8u) VMX_Read_Virtual_APIC(BX_LAPIC_TPR);
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
|
||||
Bit8u tpr_shadow = vtpr >> 4;
|
||||
if (tpr_shadow < BX_CPU_THIS_PTR vmcs.vm_tpr_threshold) {
|
||||
VMexit(VMX_VMEXIT_TPR_THRESHOLD, 0);
|
||||
BX_CPP_INLINE bx_bool vapic_read_vector(Bit32u *arr, Bit8u vector)
|
||||
{
|
||||
unsigned apic_reg = vector / 32;
|
||||
|
||||
return arr[apic_reg] & (1 << (vector & 0x1f));
|
||||
}
|
||||
|
||||
BX_CPP_INLINE void BX_CPU_C::vapic_set_vector(unsigned arrbase, Bit8u vector)
|
||||
{
|
||||
unsigned reg = vector / 32;
|
||||
Bit32u regval = VMX_Read_Virtual_APIC(arrbase + 0x10*reg);
|
||||
regval |= (1 << (vector & 0x1f));
|
||||
VMX_Write_Virtual_APIC(arrbase + 0x10*reg, regval);
|
||||
}
|
||||
|
||||
BX_CPP_INLINE Bit8u BX_CPU_C::vapic_clear_and_find_highest_priority_int(unsigned arrbase, Bit8u vector)
|
||||
{
|
||||
Bit32u arr[8];
|
||||
int n;
|
||||
|
||||
for (n=0;n<8;n++)
|
||||
arr[n] = VMX_Read_Virtual_APIC(arrbase + 0x10*n);
|
||||
|
||||
unsigned reg = vector / 32;
|
||||
arr[reg] &= ~(1 << (vector & 0x1f));
|
||||
|
||||
VMX_Write_Virtual_APIC(arrbase + 0x10*reg, arr[reg]);
|
||||
|
||||
for (n = 7; n >= 0; n--) {
|
||||
if (! arr[n]) continue;
|
||||
|
||||
for (int bit = 31; bit >= 0; bit--) {
|
||||
if (arr[n] & (1<<bit)) {
|
||||
return (n * 32 + bit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void BX_CPU_C::VMX_Evaluate_Pending_Virtual_Interrupts(void)
|
||||
{
|
||||
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
||||
|
||||
if (! VMEXIT(VMX_VM_EXEC_CTRL2_INTERRUPT_WINDOW_VMEXIT) && (vm->rvi >> 4) > (vm->vppr >> 4))
|
||||
{
|
||||
BX_INFO(("Pending Virtual Interrupt Vector 0x%x", vm->rvi));
|
||||
signal_event(BX_EVENT_PENDING_VMX_VIRTUAL_INTR);
|
||||
}
|
||||
else {
|
||||
BX_INFO(("Clear Virtual Interrupt Vector 0x%x", vm->rvi));
|
||||
clear_event(BX_EVENT_PENDING_VMX_VIRTUAL_INTR);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
// may be executed as trap-like from handleAsyncEvent and also directy from CR8 write or WRMSR
|
||||
void BX_CPU_C::VMX_TPR_Virtualization(void)
|
||||
{
|
||||
BX_DEBUG(("Trap Event: VTPR Write Trap"));
|
||||
|
||||
clear_event(BX_EVENT_VMX_VTPR_UPDATE);
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY)) {
|
||||
VMX_PPR_Virtualization();
|
||||
VMX_Evaluate_Pending_Virtual_Interrupts();
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
Bit8u tpr_shadow = (VMX_Read_Virtual_APIC(BX_LAPIC_TPR) & 0xff) >> 4;
|
||||
if (tpr_shadow < BX_CPU_THIS_PTR vmcs.vm_tpr_threshold) {
|
||||
VMexit(VMX_VMEXIT_TPR_THRESHOLD, 0); // trap-like VMEXIT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
|
||||
void BX_CPU_C::VMX_PPR_Virtualization(void)
|
||||
{
|
||||
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
||||
|
||||
Bit8u vtpr = (Bit8u) VMX_Read_Virtual_APIC(BX_LAPIC_TPR);
|
||||
Bit8u tpr_shadow = vtpr >> 4;
|
||||
|
||||
if (tpr_shadow >= (vm->svi >> 4))
|
||||
vm->vppr = vtpr;
|
||||
else
|
||||
vm->vppr = vm->svi & 0xf0;
|
||||
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_PPR, vm->vppr);
|
||||
}
|
||||
|
||||
// may be executed as trap-like from handleAsyncEvent and also directy from WRMSR
|
||||
void BX_CPU_C::VMX_EOI_Virtualization(void)
|
||||
{
|
||||
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
||||
|
||||
BX_DEBUG(("Trap Event: VEOI Write Trap"));
|
||||
|
||||
clear_event(BX_EVENT_VMX_VEOI_UPDATE);
|
||||
|
||||
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY))
|
||||
{
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_EOI, 0);
|
||||
|
||||
unsigned vector = vm->svi;
|
||||
vm->svi = vapic_clear_and_find_highest_priority_int(BX_LAPIC_ISR1, vector);
|
||||
|
||||
VMX_PPR_Virtualization();
|
||||
|
||||
if (vapic_read_vector(vm->eoi_exit_bitmap, vector)) {
|
||||
VMexit(VMX_VMEXIT_VIRTUALIZED_EOI, vector); // trap-like VMEXIT
|
||||
}
|
||||
else {
|
||||
VMX_Evaluate_Pending_Virtual_Interrupts();
|
||||
}
|
||||
}
|
||||
else {
|
||||
VMexit(VMX_VMEXIT_APIC_WRITE, BX_LAPIC_EOI); // trap-like vmexit
|
||||
}
|
||||
}
|
||||
|
||||
// may be executed as trap-like from handleAsyncEvent and also directy from WRMSR
|
||||
void BX_CPU_C::VMX_Self_IPI_Virtualization(Bit8u vector)
|
||||
{
|
||||
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
||||
|
||||
vapic_set_vector(BX_LAPIC_IRR1, vector);
|
||||
if (vector >= vm->rvi) vm->rvi = vector;
|
||||
|
||||
VMX_Evaluate_Pending_Virtual_Interrupts();
|
||||
}
|
||||
|
||||
void BX_CPU_C::VMX_Deliver_Virtual_Interrupt(void)
|
||||
{
|
||||
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
||||
|
||||
Bit8u vector = vm->rvi;
|
||||
|
||||
vapic_set_vector(BX_LAPIC_ISR1, vector);
|
||||
|
||||
vm->svi = vector;
|
||||
vm->vppr = vector & 0xf0;
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_PPR, vm->vppr);
|
||||
vm->rvi = vapic_clear_and_find_highest_priority_int(BX_LAPIC_IRR1, vector);
|
||||
clear_event(BX_EVENT_PENDING_VMX_VIRTUAL_INTR);
|
||||
|
||||
BX_CPU_THIS_PTR EXT = 1; /* external event */
|
||||
|
||||
BX_INSTR_HWINTERRUPT(BX_CPU_ID, vector,
|
||||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
|
||||
interrupt(vector, BX_EXTERNAL_INTERRUPT, 0, 0);
|
||||
|
||||
BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
|
||||
BX_CPU_THIS_PTR EXT = 0;
|
||||
|
||||
// might be not necessary but cleaner code
|
||||
longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
|
||||
}
|
||||
|
||||
#endif // BX_SUPPORT_VMX >= 2
|
||||
|
||||
// executed as trap-like from handleAsyncEvent
|
||||
void BX_CPU_C::VMX_Virtual_Apic_Access_Trap(void)
|
||||
{
|
||||
clear_event(BX_EVENT_VMX_VIRTUAL_APIC_WRITE);
|
||||
|
||||
if (is_pending(BX_EVENT_VMX_VTPR_UPDATE)) {
|
||||
VMX_TPR_Virtualization();
|
||||
}
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
else if (is_pending(BX_EVENT_VMX_VEOI_UPDATE)) {
|
||||
VMX_EOI_Virtualization();
|
||||
}
|
||||
else {
|
||||
unsigned apic_offset = BX_CPU_THIS_PTR vmcs.apic_access;
|
||||
|
||||
BX_DEBUG(("Trap Event: Virtual Apic Access Trap offset = %08x", apic_offset));
|
||||
|
||||
if (apic_offset >= BX_LAPIC_ICR_HI && apic_offset <= BX_LAPIC_ICR_HI+3) {
|
||||
// clear bits (2:0) of VICR_HI, no VMexit should happen
|
||||
BX_DEBUG(("Virtual Apic Access Trap: Clearing ICR_HI[23:0]"));
|
||||
Bit32u vicr_hi = VMX_Read_Virtual_APIC(BX_LAPIC_ICR_HI);
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_ICR_HI, vicr_hi & 0xff000000);
|
||||
}
|
||||
else if (apic_offset == BX_LAPIC_ICR_LO) {
|
||||
VMX_Write_VICR();
|
||||
}
|
||||
else {
|
||||
VMexit(VMX_VMEXIT_APIC_WRITE, apic_offset); // trap-like vmexit
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
|
||||
}
|
||||
|
||||
bx_bool BX_CPU_C::Virtualize_X2APIC_Write(unsigned msr, Bit64u val_64)
|
||||
{
|
||||
if (msr == 0x808) {
|
||||
if ((val_64 >> 8) != 0)
|
||||
exception(BX_GP_EXCEPTION, 0);
|
||||
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_TPR, val_64 & 0xff);
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_TPR + 4, 0);
|
||||
VMX_TPR_Virtualization();
|
||||
|
||||
return BX_TRUE;
|
||||
}
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY)) {
|
||||
if (msr == 0x80b) {
|
||||
// EOI virtualization
|
||||
if (val_64 != 0)
|
||||
exception(BX_GP_EXCEPTION, 0);
|
||||
|
||||
VMX_EOI_Virtualization();
|
||||
|
||||
return BX_TRUE;
|
||||
}
|
||||
|
||||
if (msr == 0x83f) {
|
||||
// Self IPI virtualization
|
||||
if ((val_64 >> 8) != 0)
|
||||
exception(BX_GP_EXCEPTION, 0);
|
||||
|
||||
Bit8u vector = val_64 & 0xff;
|
||||
if (vector < 16) {
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_SELF_IPI, vector);
|
||||
VMX_Write_Virtual_APIC(BX_LAPIC_SELF_IPI + 4, 0);
|
||||
VMexit(VMX_VMEXIT_APIC_WRITE, BX_LAPIC_SELF_IPI); // trap-like vmexit
|
||||
}
|
||||
else {
|
||||
VMX_Self_IPI_Virtualization(vector);
|
||||
}
|
||||
|
||||
return BX_TRUE;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return BX_FALSE;
|
||||
}
|
||||
|
||||
#endif // BX_SUPPORT_VMX && BX_SUPPORT_X86_64
|
||||
|
@ -110,6 +110,11 @@ bx_bool BX_CPU_C::vmcs_field_supported(Bit32u encoding)
|
||||
case VMCS_16BIT_GUEST_TR_SELECTOR:
|
||||
return 1;
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
case VMCS_16BIT_GUEST_INTERRUPT_STATUS:
|
||||
return BX_SUPPORT_VMX_EXTENSION(BX_VMX_VINTR_DELIVERY);
|
||||
#endif
|
||||
|
||||
/* VMCS 16-bit host-state fields */
|
||||
/* binary 0000_11xx_xxxx_xxx0 */
|
||||
case VMCS_16BIT_HOST_ES_SELECTOR:
|
||||
@ -238,6 +243,16 @@ bx_bool BX_CPU_C::vmcs_field_supported(Bit32u encoding)
|
||||
#endif
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
case VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP0:
|
||||
case VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP0_HI:
|
||||
case VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP1:
|
||||
case VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP1_HI:
|
||||
case VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP2:
|
||||
case VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP2_HI:
|
||||
case VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP3:
|
||||
case VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP3_HI:
|
||||
return BX_SUPPORT_VMX_EXTENSION(BX_VMX_VINTR_DELIVERY);
|
||||
|
||||
case VMCS_64BIT_CONTROL_EPTPTR:
|
||||
case VMCS_64BIT_CONTROL_EPTPTR_HI:
|
||||
return BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPT);
|
||||
@ -515,6 +530,8 @@ void BX_CPU_C::init_vmx_capabilities(void)
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_UNRESTRICTED_GUEST))
|
||||
cap->vmx_vmexec_ctrl2_supported_bits |= VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST;
|
||||
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_VINTR_DELIVERY))
|
||||
cap->vmx_vmexec_ctrl2_supported_bits |= VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_REGISTERS | VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY;
|
||||
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_PAUSE_LOOP_EXITING))
|
||||
cap->vmx_vmexec_ctrl2_supported_bits |= VMX_VM_EXEC_CTRL3_PAUSE_LOOP_VMEXIT;
|
||||
if (BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_INVPCID))
|
||||
|
@ -165,7 +165,7 @@ void BX_CPU_C::VMexit_PAUSE(void)
|
||||
|
||||
void BX_CPU_C::VMexit_ExtInterrupt(void)
|
||||
{
|
||||
if (! BX_CPU_THIS_PTR in_vmx_guest) return;
|
||||
BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest);
|
||||
|
||||
if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_EXTERNAL_INTERRUPT_VMEXIT)) {
|
||||
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
||||
|
180
bochs/cpu/vmx.cc
180
bochs/cpu/vmx.cc
@ -359,6 +359,26 @@ bx_bool BX_CPU_C::is_eptptr_valid(Bit64u eptptr)
|
||||
}
|
||||
#endif
|
||||
|
||||
BX_CPP_INLINE Bit32u rotate_r(Bit32u val_32)
|
||||
{
|
||||
return (val_32 >> 8) | (val_32 << 24);
|
||||
}
|
||||
|
||||
BX_CPP_INLINE Bit32u rotate_l(Bit32u val_32)
|
||||
{
|
||||
return (val_32 << 8) | (val_32 >> 24);
|
||||
}
|
||||
|
||||
BX_CPP_INLINE Bit32u vmx_from_ar_byte_rd(Bit32u ar_byte)
|
||||
{
|
||||
return rotate_r(ar_byte);
|
||||
}
|
||||
|
||||
BX_CPP_INLINE Bit32u vmx_from_ar_byte_wr(Bit32u ar_byte)
|
||||
{
|
||||
return rotate_l(ar_byte);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////
|
||||
// VMenter
|
||||
////////////////////////////////////////////////////////////
|
||||
@ -486,26 +506,56 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_TPR_SHADOW) {
|
||||
vm->virtual_apic_page_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_VIRTUAL_APIC_PAGE_ADDR);
|
||||
vm->vm_tpr_threshold = VMread32(VMCS_32BIT_CONTROL_TPR_THRESHOLD);
|
||||
|
||||
if ((vm->virtual_apic_page_addr & 0xfff) != 0 || ! IsValidPhyAddr(vm->virtual_apic_page_addr)) {
|
||||
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: virtual apic phy addr malformed"));
|
||||
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
||||
}
|
||||
|
||||
if (vm->vm_tpr_threshold & 0xfffffff0) {
|
||||
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: TPR threshold too big"));
|
||||
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
||||
}
|
||||
|
||||
if (! (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES)) {
|
||||
Bit8u tpr_shadow = (VMX_Read_Virtual_APIC(BX_LAPIC_TPR) >> 4) & 0xf;
|
||||
if (vm->vm_tpr_threshold > tpr_shadow) {
|
||||
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: TPR threshold > TPR shadow"));
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY) {
|
||||
if (! PIN_VMEXIT(VMX_VM_EXEC_CTRL1_EXTERNAL_INTERRUPT_VMEXIT)) {
|
||||
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: virtual interrupt delivery must be set together with external interrupt exiting"));
|
||||
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
||||
}
|
||||
|
||||
for (int reg = 0; reg < 8; reg++) {
|
||||
vm->eoi_exit_bitmap[reg] = VMread32(VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP0 + reg);
|
||||
}
|
||||
|
||||
Bit16u guest_interrupt_status = VMread16(VMCS_16BIT_GUEST_INTERRUPT_STATUS);
|
||||
vm->rvi = guest_interrupt_status & 0xff;
|
||||
vm->svi = guest_interrupt_status >> 8;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
vm->vm_tpr_threshold = VMread32(VMCS_32BIT_CONTROL_TPR_THRESHOLD);
|
||||
|
||||
if (vm->vm_tpr_threshold & 0xfffffff0) {
|
||||
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: TPR threshold too big"));
|
||||
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
||||
}
|
||||
|
||||
if (! (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES)) {
|
||||
Bit8u tpr_shadow = (VMX_Read_Virtual_APIC(BX_LAPIC_TPR) >> 4) & 0xf;
|
||||
if (vm->vm_tpr_threshold > tpr_shadow) {
|
||||
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: TPR threshold > TPR shadow"));
|
||||
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
else { // TPR shadow is disabled
|
||||
if (vm->vmexec_ctrls3 & (VMX_VM_EXEC_CTRL3_VIRTUALIZE_X2APIC_MODE |
|
||||
VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_REGISTERS |
|
||||
VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY))
|
||||
{
|
||||
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: apic virtualization is enabled without TPR shadow"));
|
||||
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
||||
}
|
||||
}
|
||||
#endif // BX_SUPPORT_VMX >= 2
|
||||
|
||||
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES) {
|
||||
vm->apic_access_page = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_APIC_ACCESS_ADDR);
|
||||
@ -513,19 +563,16 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
|
||||
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: apic access page phy addr malformed"));
|
||||
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_X2APIC_MODE) {
|
||||
// 'use TPR shadow' must be set and "virtualize APIC accesses" must be clear
|
||||
if (!(vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_TPR_SHADOW) ||
|
||||
(vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES)) {
|
||||
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: virtualize X2APIC mode misconfigured"));
|
||||
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_X2APIC_MODE) {
|
||||
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: virtualize X2APIC mode enabled together with APIC access virtualization"));
|
||||
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
|
||||
vm->eptptr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_EPTPTR);
|
||||
if (! is_eptptr_valid(vm->eptptr)) {
|
||||
@ -578,6 +625,8 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif // BX_SUPPORT_X86_64
|
||||
|
||||
//
|
||||
// Load VM-exit control fields to VMCS Cache
|
||||
//
|
||||
@ -1110,7 +1159,8 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
|
||||
Bit16u selector = VMread16(VMCS_16BIT_GUEST_ES_SELECTOR + 2*n);
|
||||
bx_address base = (bx_address) VMread_natural(VMCS_GUEST_ES_BASE + 2*n);
|
||||
Bit32u limit = VMread32(VMCS_32BIT_GUEST_ES_LIMIT + 2*n);
|
||||
Bit32u ar = VMread32(VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS + 2*n) >> 8;
|
||||
Bit32u ar = VMread32(VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS + 2*n);
|
||||
ar = vmx_from_ar_byte_rd(ar);
|
||||
bx_bool invalid = (ar >> 16) & 1;
|
||||
|
||||
set_segment_ar_data(&guest.sregs[n], !invalid,
|
||||
@ -1308,7 +1358,8 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
|
||||
Bit16u ldtr_selector = VMread16(VMCS_16BIT_GUEST_LDTR_SELECTOR);
|
||||
Bit64u ldtr_base = VMread_natural(VMCS_GUEST_LDTR_BASE);
|
||||
Bit32u ldtr_limit = VMread32(VMCS_32BIT_GUEST_LDTR_LIMIT);
|
||||
Bit32u ldtr_ar = VMread32(VMCS_32BIT_GUEST_LDTR_ACCESS_RIGHTS) >> 8;
|
||||
Bit32u ldtr_ar = VMread32(VMCS_32BIT_GUEST_LDTR_ACCESS_RIGHTS);
|
||||
ldtr_ar = vmx_from_ar_byte_rd(ldtr_ar);
|
||||
bx_bool ldtr_invalid = (ldtr_ar >> 16) & 1;
|
||||
if (set_segment_ar_data(&guest.ldtr, !ldtr_invalid,
|
||||
(Bit16u) ldtr_selector, ldtr_base, ldtr_limit, (Bit16u)(ldtr_ar)))
|
||||
@ -1319,7 +1370,7 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
|
||||
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
||||
}
|
||||
if (guest.ldtr.cache.type != BX_SYS_SEGMENT_LDT) {
|
||||
BX_ERROR(("VMENTER FAIL: VMCS guest incorrect LDTR type"));
|
||||
BX_ERROR(("VMENTER FAIL: VMCS guest incorrect LDTR type (%d)", guest.ldtr.cache.type));
|
||||
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
||||
}
|
||||
if (guest.ldtr.cache.segment) {
|
||||
@ -1349,7 +1400,8 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
|
||||
Bit16u tr_selector = VMread16(VMCS_16BIT_GUEST_TR_SELECTOR);
|
||||
Bit64u tr_base = VMread_natural(VMCS_GUEST_TR_BASE);
|
||||
Bit32u tr_limit = VMread32(VMCS_32BIT_GUEST_TR_LIMIT);
|
||||
Bit32u tr_ar = VMread32(VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS) >> 8;
|
||||
Bit32u tr_ar = VMread32(VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS);
|
||||
tr_ar = vmx_from_ar_byte_rd(tr_ar);
|
||||
bx_bool tr_invalid = (tr_ar >> 16) & 1;
|
||||
|
||||
#if BX_SUPPORT_X86_64
|
||||
@ -1379,8 +1431,8 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
|
||||
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
||||
}
|
||||
if (! IsLimitAccessRightsConsistent(tr_limit, tr_ar)) {
|
||||
BX_ERROR(("VMENTER FAIL: VMCS guest TR.AR/LIMIT malformed"));
|
||||
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
||||
BX_ERROR(("VMENTER FAIL: VMCS guest TR.AR/LIMIT malformed"));
|
||||
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
||||
}
|
||||
|
||||
switch(guest.tr.cache.type) {
|
||||
@ -1590,9 +1642,9 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
|
||||
#if BX_SUPPORT_X86_64
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
// modify EFER.LMA / EFER.LME before setting CR4
|
||||
|
||||
// It is recommended that 64-bit VMM software use the 1-settings of the "load IA32_EFER"
|
||||
// VM entry control and the "save IA32_EFER" VM-exit control. If VMentry is establishing
|
||||
|
||||
// It is recommended that 64-bit VMM software use the 1-settings of the "load IA32_EFER"
|
||||
// VM entry control and the "save IA32_EFER" VM-exit control. If VMentry is establishing
|
||||
// CR0.PG=0 and if the "IA-32e mode guest" and "load IA32_EFER" VM entry controls are
|
||||
// both 0, VM entry leaves IA32_EFER.LME unmodified (i.e., the host value will persist
|
||||
// in the guest) -- Quote from Intel SDM
|
||||
@ -1902,12 +1954,13 @@ void BX_CPU_C::VMexitSaveGuestState(void)
|
||||
bx_bool invalid = !BX_CPU_THIS_PTR sregs[n].cache.valid;
|
||||
bx_address base = BX_CPU_THIS_PTR sregs[n].cache.u.segment.base;
|
||||
Bit32u limit = BX_CPU_THIS_PTR sregs[n].cache.u.segment.limit_scaled;
|
||||
Bit32u ar = get_descriptor_h(&BX_CPU_THIS_PTR sregs[n].cache) & 0x00f0ff00;
|
||||
Bit32u ar = (get_descriptor_h(&BX_CPU_THIS_PTR sregs[n].cache) & 0x00f0ff00) >> 8;
|
||||
ar = vmx_from_ar_byte_wr(ar | (invalid << 16));
|
||||
|
||||
VMwrite16(VMCS_16BIT_GUEST_ES_SELECTOR + 2*n, selector);
|
||||
VMwrite32(VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS + 2*n, ar);
|
||||
VMwrite_natural(VMCS_GUEST_ES_BASE + 2*n, base);
|
||||
VMwrite32(VMCS_32BIT_GUEST_ES_LIMIT + 2*n, limit);
|
||||
VMwrite32(VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS + 2*n, ar | (invalid << 24));
|
||||
}
|
||||
|
||||
// save guest LDTR
|
||||
@ -1915,24 +1968,26 @@ void BX_CPU_C::VMexitSaveGuestState(void)
|
||||
bx_bool ldtr_invalid = !BX_CPU_THIS_PTR ldtr.cache.valid;
|
||||
bx_address ldtr_base = BX_CPU_THIS_PTR ldtr.cache.u.segment.base;
|
||||
Bit32u ldtr_limit = BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled;
|
||||
Bit32u ldtr_ar = get_descriptor_h(&BX_CPU_THIS_PTR ldtr.cache) & 0x00f0ff00;
|
||||
Bit32u ldtr_ar = (get_descriptor_h(&BX_CPU_THIS_PTR ldtr.cache) & 0x00f0ff00) >> 8;
|
||||
ldtr_ar = vmx_from_ar_byte_wr(ldtr_ar | (ldtr_invalid << 16));
|
||||
|
||||
VMwrite16(VMCS_16BIT_GUEST_LDTR_SELECTOR, ldtr_selector);
|
||||
VMwrite32(VMCS_32BIT_GUEST_LDTR_ACCESS_RIGHTS, ldtr_ar);
|
||||
VMwrite_natural(VMCS_GUEST_LDTR_BASE, ldtr_base);
|
||||
VMwrite32(VMCS_32BIT_GUEST_LDTR_LIMIT, ldtr_limit);
|
||||
VMwrite32(VMCS_32BIT_GUEST_LDTR_ACCESS_RIGHTS, ldtr_ar | (ldtr_invalid << 24));
|
||||
|
||||
// save guest TR
|
||||
Bit32u tr_selector = BX_CPU_THIS_PTR tr.selector.value;
|
||||
bx_bool tr_invalid = !BX_CPU_THIS_PTR tr.cache.valid;
|
||||
bx_address tr_base = BX_CPU_THIS_PTR tr.cache.u.segment.base;
|
||||
Bit32u tr_limit = BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled;
|
||||
Bit32u tr_ar = get_descriptor_h(&BX_CPU_THIS_PTR tr.cache) & 0x00f0ff00;
|
||||
Bit32u tr_ar = (get_descriptor_h(&BX_CPU_THIS_PTR tr.cache) & 0x00f0ff00) >> 8;
|
||||
tr_ar = vmx_from_ar_byte_wr(tr_ar | (tr_invalid << 16));
|
||||
|
||||
VMwrite16(VMCS_16BIT_GUEST_TR_SELECTOR, tr_selector);
|
||||
VMwrite32(VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS, tr_ar);
|
||||
VMwrite_natural(VMCS_GUEST_TR_BASE, tr_base);
|
||||
VMwrite32(VMCS_32BIT_GUEST_TR_LIMIT, tr_limit);
|
||||
VMwrite32(VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS, tr_ar | (tr_invalid << 24));
|
||||
|
||||
VMwrite_natural(VMCS_GUEST_GDTR_BASE, BX_CPU_THIS_PTR gdtr.base);
|
||||
VMwrite32(VMCS_32BIT_GUEST_GDTR_LIMIT, BX_CPU_THIS_PTR gdtr.limit);
|
||||
@ -1987,6 +2042,10 @@ void BX_CPU_C::VMexitSaveGuestState(void)
|
||||
// Store back to VMCS
|
||||
if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_VMX_PREEMPTION_TIMER)
|
||||
VMwrite32(VMCS_32BIT_GUEST_PREEMPTION_TIMER_VALUE, BX_CPU_THIS_PTR lapic.read_vmx_preemption_timer());
|
||||
|
||||
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY) {
|
||||
VMwrite16(VMCS_16BIT_GUEST_INTERRUPT_STATUS, (((Bit16u) vm->svi) << 8) | vm->rvi);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2212,9 +2271,11 @@ void BX_CPU_C::VMexit(Bit32u reason, Bit64u qualification)
|
||||
}
|
||||
|
||||
// VMEXITs are FAULT-like: restore RIP/RSP to value before VMEXIT occurred
|
||||
RIP = BX_CPU_THIS_PTR prev_rip;
|
||||
if (BX_CPU_THIS_PTR speculative_rsp)
|
||||
RSP = BX_CPU_THIS_PTR prev_rsp;
|
||||
if (! IS_TRAP_LIKE_VMEXIT(reason)) {
|
||||
RIP = BX_CPU_THIS_PTR prev_rip;
|
||||
if (BX_CPU_THIS_PTR speculative_rsp)
|
||||
RSP = BX_CPU_THIS_PTR prev_rsp;
|
||||
}
|
||||
|
||||
//
|
||||
// STEP 1: Saving Guest State to VMCS
|
||||
@ -2236,10 +2297,13 @@ void BX_CPU_C::VMexit(Bit32u reason, Bit64u qualification)
|
||||
|
||||
// entering VMX root mode: clear possibly pending guest VMX events
|
||||
clear_event(BX_EVENT_VMX_VTPR_UPDATE |
|
||||
BX_EVENT_VMX_VEOI_UPDATE |
|
||||
BX_EVENT_VMX_VIRTUAL_APIC_WRITE |
|
||||
BX_EVENT_VMX_MONITOR_TRAP_FLAG |
|
||||
BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING |
|
||||
BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED |
|
||||
BX_EVENT_VMX_NMI_WINDOW_EXITING);
|
||||
BX_EVENT_VMX_NMI_WINDOW_EXITING |
|
||||
BX_EVENT_PENDING_VMX_VIRTUAL_INTR);
|
||||
|
||||
//
|
||||
// STEP 2: Load Host State
|
||||
@ -2272,8 +2336,7 @@ void BX_CPU_C::VMexit(Bit32u reason, Bit64u qualification)
|
||||
}
|
||||
#endif
|
||||
|
||||
// skip the longjmp for TRAP-like VMEXITs
|
||||
if (reason != VMX_VMEXIT_TPR_THRESHOLD) {
|
||||
if (! IS_TRAP_LIKE_VMEXIT(reason)) {
|
||||
longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
|
||||
}
|
||||
}
|
||||
@ -2629,6 +2692,17 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMLAUNCH(bxInstruction_c *i)
|
||||
///////////////////////////////////////////////////////
|
||||
|
||||
VMenterInjectEvents();
|
||||
|
||||
// - When virtual-interrupt-delivery is set this will cause PPR virtualization
|
||||
// followed by Virtual Interrupt Evaluation
|
||||
// - When use TPR shadow together with Virtualize APIC Access are set this would
|
||||
// cause TPR threshold check
|
||||
// - When Virtualize APIC Access is disabled the code would pass through TPR
|
||||
// threshold check but no VMExit would occur (otherwise VMEntry should fail
|
||||
// consistency checks before).
|
||||
if (VMEXIT(VMX_VM_EXEC_CTRL2_TPR_SHADOW)) {
|
||||
VMX_TPR_Virtualization();
|
||||
}
|
||||
#endif
|
||||
|
||||
BX_NEXT_TRACE(i);
|
||||
@ -2700,16 +2774,6 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMPTRST(bxInstruction_c *i)
|
||||
BX_NEXT_INSTR(i);
|
||||
}
|
||||
|
||||
BX_CPP_INLINE Bit32u rotate_r(Bit32u val_32)
|
||||
{
|
||||
return (val_32 >> 8) | (val_32 << 24);
|
||||
}
|
||||
|
||||
BX_CPP_INLINE Bit32u rotate_l(Bit32u val_32)
|
||||
{
|
||||
return (val_32 << 8) | (val_32 >> 24);
|
||||
}
|
||||
|
||||
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMREAD_EdGd(bxInstruction_c *i)
|
||||
{
|
||||
#if BX_SUPPORT_VMX
|
||||
@ -2757,7 +2821,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMREAD_EdGd(bxInstruction_c *i)
|
||||
else if(width == VMCS_FIELD_WIDTH_32BIT) {
|
||||
// the real hardware write access rights rotated
|
||||
if (encoding >= VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS && encoding <= VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS)
|
||||
field_64 = rotate_r(VMread32(encoding));
|
||||
field_64 = vmx_from_ar_byte_rd(VMread32(encoding));
|
||||
else
|
||||
field_64 = VMread32(encoding);
|
||||
}
|
||||
@ -2880,7 +2944,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE_GdEd(bxInstruction_c *i)
|
||||
else if(width == VMCS_FIELD_WIDTH_32BIT) {
|
||||
// the real hardware write access rights rotated
|
||||
if (encoding >= VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS && encoding <= VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS)
|
||||
VMwrite32(encoding, rotate_l(val_32));
|
||||
VMwrite32(encoding, vmx_from_ar_byte_wr(val_32));
|
||||
else
|
||||
VMwrite32(encoding, val_32);
|
||||
}
|
||||
@ -3258,6 +3322,20 @@ void BX_CPU_C::register_vmx_state(bx_param_c *parent)
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, first_pause_time, BX_CPU_THIS_PTR vmcs.first_pause_time);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, last_pause_time, BX_CPU_THIS_PTR vmcs.last_pause_time);
|
||||
#endif
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, svi, BX_CPU_THIS_PTR vmcs.svi);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, rvi, BX_CPU_THIS_PTR vmcs.rvi);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vppr, BX_CPU_THIS_PTR vmcs.vppr);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, apic_access, BX_CPU_THIS_PTR vmcs.apic_access);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap0, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[0]);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap1, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[1]);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap2, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[2]);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap3, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[3]);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap4, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[4]);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap5, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[5]);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap6, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[6]);
|
||||
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap7, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[7]);
|
||||
#endif
|
||||
|
||||
//
|
||||
// VM-Exit Control Fields
|
||||
|
@ -136,6 +136,11 @@ enum VMX_vmexit_reason {
|
||||
VMX_VMEXIT_LAST_REASON
|
||||
};
|
||||
|
||||
#define IS_TRAP_LIKE_VMEXIT(reason) \
|
||||
(reason == VMX_VMEXIT_TPR_THRESHOLD || \
|
||||
reason == VMX_VMEXIT_VIRTUALIZED_EOI || \
|
||||
reason == VMX_VMEXIT_APIC_WRITE)
|
||||
|
||||
// VMexit on CR register access
|
||||
enum {
|
||||
VMX_VMEXIT_CR_ACCESS_CR_WRITE = 0,
|
||||
@ -649,6 +654,16 @@ typedef struct bx_VMCS
|
||||
Bit32u first_pause_time;
|
||||
#endif
|
||||
|
||||
#if BX_SUPPORT_VMX >= 2
|
||||
Bit8u svi; /* Servicing Virtual Interrupt */
|
||||
Bit8u rvi; /* Requesting Virtual Interrupt */
|
||||
Bit8u vppr;
|
||||
|
||||
Bit32u eoi_exit_bitmap[8];
|
||||
|
||||
unsigned apic_access;
|
||||
#endif
|
||||
|
||||
//
|
||||
// VM-Exit Control Fields
|
||||
//
|
||||
|
Loading…
x
Reference in New Issue
Block a user