diff --git a/bochs/cpu/cpu.h b/bochs/cpu/cpu.h index 03a930175..77c401e3e 100644 --- a/bochs/cpu/cpu.h +++ b/bochs/cpu/cpu.h @@ -4336,7 +4336,7 @@ public: // for now... BX_SMF VMX_error_code VMenterLoadCheckHostState(void); BX_SMF Bit32u VMenterLoadCheckGuestState(Bit64u *qualification); BX_SMF void VMenterInjectEvents(void); - BX_SMF void VMexit(bxInstruction_c *i, Bit32u reason, Bit64u qualification); + BX_SMF void VMexit(Bit32u reason, Bit64u qualification); BX_SMF void VMexitSaveGuestState(void); BX_SMF void VMexitSaveGuestMSRs(void); BX_SMF void VMexitLoadHostState(void); @@ -4357,17 +4357,17 @@ public: // for now... #endif // vmexit reasons BX_SMF void VMexit_Instruction(bxInstruction_c *i, Bit32u reason) BX_CPP_AttrRegparmN(2); - BX_SMF void VMexit_Event(bxInstruction_c *i, unsigned type, unsigned vector, + BX_SMF void VMexit_Event(unsigned type, unsigned vector, Bit16u errcode, bx_bool errcode_valid, Bit64u qualification = 0); BX_SMF void VMexit_TripleFault(void); BX_SMF void VMexit_ExtInterrupt(void); - BX_SMF void VMexit_TaskSwitch(bxInstruction_c *i, Bit16u tss_selector, unsigned source) BX_CPP_AttrRegparmN(3); - BX_SMF void VMexit_PAUSE(bxInstruction_c *i) BX_CPP_AttrRegparmN(1); + BX_SMF void VMexit_TaskSwitch(Bit16u tss_selector, unsigned source) BX_CPP_AttrRegparmN(2); + BX_SMF void VMexit_PAUSE(void); #if BX_SUPPORT_VMX >= 2 BX_SMF void VMexit_PreemptionTimerExpired(void); #endif - BX_SMF bx_bool VMexit_CLTS(bxInstruction_c *i) BX_CPP_AttrRegparmN(1); - BX_SMF void VMexit_MSR(bxInstruction_c *i, unsigned op, Bit32u msr) BX_CPP_AttrRegparmN(3); + BX_SMF bx_bool VMexit_CLTS(void); + BX_SMF void VMexit_MSR(unsigned op, Bit32u msr) BX_CPP_AttrRegparmN(2); BX_SMF void VMexit_IO(bxInstruction_c *i, unsigned port, unsigned len) BX_CPP_AttrRegparmN(3); BX_SMF Bit32u VMexit_LMSW(bxInstruction_c *i, Bit32u msw) BX_CPP_AttrRegparmN(2); BX_SMF bx_address VMexit_CR0_Write(bxInstruction_c *i, bx_address) BX_CPP_AttrRegparmN(2); @@ -4378,7 +4378,7 @@ public: // for now... BX_SMF void VMexit_CR8_Write(bxInstruction_c *i) BX_CPP_AttrRegparmN(1); BX_SMF void VMexit_DR_Access(bxInstruction_c *i, unsigned read) BX_CPP_AttrRegparmN(2); #if BX_SUPPORT_VMX >= 2 - BX_SMF void vmfunc_eptp_switching(bxInstruction_c *i) BX_CPP_AttrRegparmN(1); + BX_SMF void vmfunc_eptp_switching(void); #endif #endif diff --git a/bochs/cpu/crregs.cc b/bochs/cpu/crregs.cc index 6efad7f68..b7afe172b 100644 --- a/bochs/cpu/crregs.cc +++ b/bochs/cpu/crregs.cc @@ -1389,7 +1389,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CLTS(bxInstruction_c *i) #if BX_SUPPORT_VMX if (BX_CPU_THIS_PTR in_vmx_guest) { - if(VMexit_CLTS(i)) { + if(VMexit_CLTS()) { BX_NEXT_TRACE(i); } } diff --git a/bochs/cpu/event.cc b/bochs/cpu/event.cc index be73253d2..b8787e0b1 100644 --- a/bochs/cpu/event.cc +++ b/bochs/cpu/event.cc @@ -210,7 +210,7 @@ bx_bool BX_CPU_C::handleAsyncEvent(void) #if BX_SUPPORT_VMX if (BX_CPU_THIS_PTR in_vmx_guest) { BX_ERROR(("VMEXIT: INIT pin asserted")); - VMexit(0, VMX_VMEXIT_INIT, 0); + VMexit(VMX_VMEXIT_INIT, 0); } #endif // reset will clear pending INIT @@ -266,7 +266,7 @@ bx_bool BX_CPU_C::handleAsyncEvent(void) { // NMI-window exiting BX_ERROR(("VMEXIT: NMI window exiting")); - VMexit(0, VMX_VMEXIT_NMI_WINDOW, 0); + VMexit(VMX_VMEXIT_NMI_WINDOW, 0); } #endif else if (BX_CPU_THIS_PTR pending_NMI && ! BX_CPU_THIS_PTR disable_NMI) { @@ -279,7 +279,7 @@ bx_bool BX_CPU_C::handleAsyncEvent(void) BX_CPU_THIS_PTR disable_NMI = 1; BX_CPU_THIS_PTR EXT = 1; /* external event */ #if BX_SUPPORT_VMX - VMexit_Event(0, BX_NMI, 2, 0, 0); + VMexit_Event(BX_NMI, 2, 0, 0); #endif BX_INSTR_HWINTERRUPT(BX_CPU_ID, 2, BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP); interrupt(2, BX_NMI, 0, 0); @@ -288,7 +288,7 @@ bx_bool BX_CPU_C::handleAsyncEvent(void) else if (BX_CPU_THIS_PTR vmx_interrupt_window && BX_CPU_THIS_PTR get_IF()) { // interrupt-window exiting BX_DEBUG(("VMEXIT: interrupt window exiting")); - VMexit(0, VMX_VMEXIT_INTERRUPT_WINDOW, 0); + VMexit(VMX_VMEXIT_INTERRUPT_WINDOW, 0); } #endif else if (BX_CPU_INTR && BX_DBG_ASYNC_INTR && diff --git a/bochs/cpu/exception.cc b/bochs/cpu/exception.cc index 2383871fb..896edb41d 100644 --- a/bochs/cpu/exception.cc +++ b/bochs/cpu/exception.cc @@ -851,7 +851,7 @@ void BX_CPU_C::exception(unsigned vector, Bit16u error_code) } #if BX_SUPPORT_VMX - VMexit_Event(0, BX_HARDWARE_EXCEPTION, vector, error_code, push_error); + VMexit_Event(BX_HARDWARE_EXCEPTION, vector, error_code, push_error); #endif #if BX_SUPPORT_SVM diff --git a/bochs/cpu/msr.cc b/bochs/cpu/msr.cc index 6efb1edca..f498d3154 100644 --- a/bochs/cpu/msr.cc +++ b/bochs/cpu/msr.cc @@ -365,7 +365,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDMSR(bxInstruction_c *i) #if BX_SUPPORT_VMX if (BX_CPU_THIS_PTR in_vmx_guest) - VMexit_MSR(i, VMX_VMEXIT_RDMSR, index); + VMexit_MSR(VMX_VMEXIT_RDMSR, index); #endif #if BX_SUPPORT_VMX >= 2 @@ -867,7 +867,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::WRMSR(bxInstruction_c *i) #if BX_SUPPORT_VMX if (BX_CPU_THIS_PTR in_vmx_guest) - VMexit_MSR(i, VMX_VMEXIT_WRMSR, index); + VMexit_MSR(VMX_VMEXIT_WRMSR, index); #endif #if BX_SUPPORT_VMX >= 2 diff --git a/bochs/cpu/paging.cc b/bochs/cpu/paging.cc index a7533a16e..46af68f84 100644 --- a/bochs/cpu/paging.cc +++ b/bochs/cpu/paging.cc @@ -492,7 +492,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::INVLPG(bxInstruction_c* i) if (BX_CPU_THIS_PTR in_vmx_guest) { if (VMEXIT(VMX_VM_EXEC_CTRL2_INVLPG_VMEXIT)) { BX_ERROR(("VMEXIT: INVLPG 0x" FMT_ADDRX, laddr)); - VMexit(i, VMX_VMEXIT_INVLPG, laddr); + VMexit(VMX_VMEXIT_INVLPG, laddr); } } #endif @@ -540,7 +540,7 @@ void BX_CPU_C::page_fault(unsigned fault, bx_address laddr, unsigned user, unsig #endif #if BX_SUPPORT_VMX - VMexit_Event(0, BX_HARDWARE_EXCEPTION, BX_PF_EXCEPTION, error_code, 1, laddr); // before the CR2 was modified + VMexit_Event(BX_HARDWARE_EXCEPTION, BX_PF_EXCEPTION, error_code, 1, laddr); // before the CR2 was modified #endif BX_CPU_THIS_PTR cr2 = laddr; @@ -1569,7 +1569,7 @@ bx_phy_address BX_CPU_C::translate_guest_physical(bx_phy_address guest_paddr, bx VMwrite64(VMCS_64BIT_GUEST_PHYSICAL_ADDR, guest_paddr); if (vmexit_reason == VMX_VMEXIT_EPT_MISCONFIGURATION) { - VMexit(0, VMX_VMEXIT_EPT_MISCONFIGURATION, 0); + VMexit(VMX_VMEXIT_EPT_MISCONFIGURATION, 0); } else { if (guest_laddr_valid) { @@ -1577,7 +1577,7 @@ bx_phy_address BX_CPU_C::translate_guest_physical(bx_phy_address guest_paddr, bx vmexit_qualification |= 0x80; if (! is_page_walk) vmexit_qualification |= 0x100; } - VMexit(0, VMX_VMEXIT_EPT_VIOLATION, vmexit_qualification | (combined_access << 3)); + VMexit(VMX_VMEXIT_EPT_VIOLATION, vmexit_qualification | (combined_access << 3)); } } diff --git a/bochs/cpu/proc_ctrl.cc b/bochs/cpu/proc_ctrl.cc index 635fb8f37..bdf620cc5 100644 --- a/bochs/cpu/proc_ctrl.cc +++ b/bochs/cpu/proc_ctrl.cc @@ -45,7 +45,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::PAUSE(bxInstruction_c *i) { #if BX_SUPPORT_VMX if (BX_CPU_THIS_PTR in_vmx_guest) - VMexit_PAUSE(i); + VMexit_PAUSE(); #endif #if BX_SUPPORT_SVM @@ -74,7 +74,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CPUID(bxInstruction_c *i) #if BX_SUPPORT_VMX if (BX_CPU_THIS_PTR in_vmx_guest) { BX_DEBUG(("VMEXIT: CPUID in VMX non-root operation")); - VMexit(i, VMX_VMEXIT_CPUID, 0); + VMexit(VMX_VMEXIT_CPUID, 0); } #endif @@ -152,7 +152,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::HLT(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx_guest) { if (VMEXIT(VMX_VM_EXEC_CTRL2_HLT_VMEXIT)) { BX_ERROR(("VMEXIT: HLT")); - VMexit(i, VMX_VMEXIT_HLT, 0); + VMexit(VMX_VMEXIT_HLT, 0); } } #endif @@ -201,7 +201,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::INVD(bxInstruction_c *i) #if BX_SUPPORT_VMX if (BX_CPU_THIS_PTR in_vmx_guest) { BX_ERROR(("VMEXIT: INVD in VMX non-root operation")); - VMexit(i, VMX_VMEXIT_INVD, 0); + VMexit(VMX_VMEXIT_INVD, 0); } #endif @@ -234,7 +234,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::WBINVD(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx_guest) { if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_WBINVD_VMEXIT)) { BX_ERROR(("VMEXIT: WBINVD in VMX non-root operation")); - VMexit(i, VMX_VMEXIT_WBINVD, 0); + VMexit(VMX_VMEXIT_WBINVD, 0); } } #endif @@ -476,7 +476,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDPMC(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx_guest) { if (VMEXIT(VMX_VM_EXEC_CTRL2_RDPMC_VMEXIT)) { BX_DEBUG(("VMEXIT: RDPMC")); - VMexit(i, VMX_VMEXIT_RDPMC, 0); + VMexit(VMX_VMEXIT_RDPMC, 0); } } #endif @@ -551,7 +551,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDTSC(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx_guest) { if (VMEXIT(VMX_VM_EXEC_CTRL2_RDTSC_VMEXIT)) { BX_DEBUG(("VMEXIT: RDTSC")); - VMexit(i, VMX_VMEXIT_RDTSC, 0); + VMexit(VMX_VMEXIT_RDTSC, 0); } } #endif @@ -596,7 +596,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RDTSCP(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx_guest) { if (VMEXIT(VMX_VM_EXEC_CTRL2_RDTSC_VMEXIT)) { BX_DEBUG(("VMEXIT: RDTSCP")); - VMexit(i, VMX_VMEXIT_RDTSCP, 0); + VMexit(VMX_VMEXIT_RDTSCP, 0); } } #endif @@ -660,7 +660,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MONITOR(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx_guest) { if (VMEXIT(VMX_VM_EXEC_CTRL2_MONITOR_VMEXIT)) { BX_DEBUG(("VMEXIT: MONITOR")); - VMexit(i, VMX_VMEXIT_MONITOR, 0); + VMexit(VMX_VMEXIT_MONITOR, 0); } } #endif @@ -736,7 +736,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MWAIT(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx_guest) { if (VMEXIT(VMX_VM_EXEC_CTRL2_MWAIT_VMEXIT)) { BX_DEBUG(("VMEXIT: MWAIT")); - VMexit(i, VMX_VMEXIT_MWAIT, BX_CPU_THIS_PTR monitor.armed); + VMexit(VMX_VMEXIT_MWAIT, BX_CPU_THIS_PTR monitor.armed); } } #endif diff --git a/bochs/cpu/smm.cc b/bochs/cpu/smm.cc index 23cec039f..5d0788a67 100644 --- a/bochs/cpu/smm.cc +++ b/bochs/cpu/smm.cc @@ -72,7 +72,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RSM(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx) { if (BX_CPU_THIS_PTR in_vmx_guest) { BX_ERROR(("VMEXIT: RSM in VMX non-root operation")); - VMexit(i, VMX_VMEXIT_RSM, 0); + VMexit(VMX_VMEXIT_RSM, 0); } else { BX_ERROR(("RSM in VMX root operation !")); @@ -530,8 +530,8 @@ bx_bool BX_CPU_C::smram_restore_state(const Bit32u *saved_state) BX_CPU_THIS_PTR in_vmx = 1; BX_CPU_THIS_PTR in_vmx_guest = BX_CPU_THIS_PTR in_smm_vmx_guest; BX_INFO(("SMM Restore: enable VMX %s mode", BX_CPU_THIS_PTR in_vmx_guest ? "guest" : "host")); - temp_cr4 |= BX_CR4_VMXE_MASK; /* set VMXE */ - temp_cr0 |= (1<<31) /* PG */ | (1 << 5) /* NE */ | 0x1 /* PE */; + temp_cr4 |= BX_CR4_VMXE_MASK; + temp_cr0 |= BX_CR0_PG_MASK | BX_CR0_NE_MASK | BX_CR0_PE_MASK; // block and disable A20M; } #endif diff --git a/bochs/cpu/soft_int.cc b/bochs/cpu/soft_int.cc index 7acca588a..41231dc4e 100644 --- a/bochs/cpu/soft_int.cc +++ b/bochs/cpu/soft_int.cc @@ -63,7 +63,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::BOUND_GdMa(bxInstruction_c *i) BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::INT1(bxInstruction_c *i) { #if BX_SUPPORT_VMX - VMexit_Event(i, BX_PRIVILEGED_SOFTWARE_INTERRUPT, 1, 0, 0); + VMexit_Event(BX_PRIVILEGED_SOFTWARE_INTERRUPT, 1, 0, 0); #endif #if BX_SUPPORT_SVM @@ -95,7 +95,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::INT3(bxInstruction_c *i) // INT 3 is not IOPL sensitive #if BX_SUPPORT_VMX - VMexit_Event(i, BX_SOFTWARE_EXCEPTION, 3, 0, 0); + VMexit_Event(BX_SOFTWARE_EXCEPTION, 3, 0, 0); #endif #if BX_SUPPORT_SVM @@ -122,7 +122,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::INT_Ib(bxInstruction_c *i) Bit8u vector = i->Ib(); #if BX_SUPPORT_VMX - VMexit_Event(i, BX_SOFTWARE_INTERRUPT, vector, 0, 0); + VMexit_Event(BX_SOFTWARE_INTERRUPT, vector, 0, 0); #endif #if BX_SUPPORT_SVM @@ -155,7 +155,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::INTO(bxInstruction_c *i) if (get_OF()) { #if BX_SUPPORT_VMX - VMexit_Event(i, BX_SOFTWARE_EXCEPTION, 4, 0, 0); + VMexit_Event(BX_SOFTWARE_EXCEPTION, 4, 0, 0); #endif #if BX_SUPPORT_SVM diff --git a/bochs/cpu/tasking.cc b/bochs/cpu/tasking.cc index 1ccc92409..91fec6d89 100644 --- a/bochs/cpu/tasking.cc +++ b/bochs/cpu/tasking.cc @@ -169,7 +169,7 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector, #if BX_SUPPORT_VMX if (BX_CPU_THIS_PTR in_vmx_guest) - VMexit_TaskSwitch(i, tss_selector->value, source); + VMexit_TaskSwitch(tss_selector->value, source); #endif // Gather info about old TSS diff --git a/bochs/cpu/vmexit.cc b/bochs/cpu/vmexit.cc index c1be25dd6..fd50e4d8b 100644 --- a/bochs/cpu/vmexit.cc +++ b/bochs/cpu/vmexit.cc @@ -98,15 +98,6 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_Instruction(bxInstruction_c *i, Bit Bit32u instr_info = 0; switch(reason) { - case VMX_VMEXIT_VMCALL: - case VMX_VMEXIT_VMLAUNCH: - case VMX_VMEXIT_VMRESUME: - case VMX_VMEXIT_VMXOFF: -#if BX_SUPPORT_VMX >= 2 - case VMX_VMEXIT_VMFUNC: -#endif - // do not have VMEXIT instruction info - break; case VMX_VMEXIT_VMREAD: case VMX_VMEXIT_VMWRITE: case VMX_VMEXIT_VMPTRLD: @@ -137,16 +128,16 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_Instruction(bxInstruction_c *i, Bit BX_PANIC(("VMexit_Instruction reason %d", reason)); } - VMexit(i, reason, qualification); + VMexit(reason, qualification); } -void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_PAUSE(bxInstruction_c *i) +void BX_CPU_C::VMexit_PAUSE(void) { BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest); if (VMEXIT(VMX_VM_EXEC_CTRL2_PAUSE_VMEXIT)) { BX_DEBUG(("VMEXIT: PAUSE")); - VMexit(i, VMX_VMEXIT_PAUSE, 0); + VMexit(VMX_VMEXIT_PAUSE, 0); } #if BX_SUPPORT_VMX >= 2 @@ -158,7 +149,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_PAUSE(bxInstruction_c *i) } else { if ((currtime - vm->first_pause_time) > vm->pause_loop_exiting_window) - VMexit(i, VMX_VMEXIT_PAUSE, 0); + VMexit(VMX_VMEXIT_PAUSE, 0); } vm->last_pause_time = currtime; } @@ -174,7 +165,7 @@ void BX_CPU_C::VMexit_ExtInterrupt(void) if (! (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_INTA_ON_VMEXIT)) { // interrupt wasn't acknowledged and still pending, interruption info is invalid VMwrite32(VMCS_32BIT_VMEXIT_INTERRUPTION_INFO, 0); - VMexit(0, VMX_VMEXIT_EXTERNAL_INTERRUPT, 0); + VMexit(VMX_VMEXIT_EXTERNAL_INTERRUPT, 0); } } } @@ -186,12 +177,12 @@ void BX_CPU_C::VMexit_PreemptionTimerExpired(void) if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VMX_PREEMPTION_TIMER_VMEXIT)) { BX_DEBUG(("VMEXIT: VMX Preemption Timer Expired")); - VMexit(0, VMX_VMEXIT_VMX_PREEMPTION_TIMER_EXPIRED, 0); + VMexit(VMX_VMEXIT_VMX_PREEMPTION_TIMER_EXPIRED, 0); } } #endif -void BX_CPU_C::VMexit_Event(bxInstruction_c *i, unsigned type, unsigned vector, Bit16u errcode, bx_bool errcode_valid, Bit64u qualification) +void BX_CPU_C::VMexit_Event(unsigned type, unsigned vector, Bit16u errcode, bx_bool errcode_valid, Bit64u qualification) { if (! BX_CPU_THIS_PTR in_vmx_guest) return; @@ -244,10 +235,6 @@ void BX_CPU_C::VMexit_Event(bxInstruction_c *i, unsigned type, unsigned vector, // [31:31] | interruption info valid // - if (i) { - VMwrite32(VMCS_32BIT_VMEXIT_INSTRUCTION_LENGTH, i->ilen()); - } - if (! vmexit) { // record IDT vectoring information vm->idt_vector_error_code = errcode; @@ -281,7 +268,7 @@ void BX_CPU_C::VMexit_Event(bxInstruction_c *i, unsigned type, unsigned vector, VMwrite32(VMCS_32BIT_VMEXIT_INTERRUPTION_INFO, interruption_info); VMwrite32(VMCS_32BIT_VMEXIT_INTERRUPTION_ERR_CODE, errcode); - VMexit(0, reason, qualification); + VMexit(reason, qualification); } void BX_CPU_C::VMexit_TripleFault(void) @@ -294,16 +281,16 @@ void BX_CPU_C::VMexit_TripleFault(void) // in a triple fault exception (that causes VMEXIT directly) BX_CPU_THIS_PTR in_event = 0; - VMexit(0, VMX_VMEXIT_TRIPLE_FAULT, 0); + VMexit(VMX_VMEXIT_TRIPLE_FAULT, 0); } -void BX_CPP_AttrRegparmN(3) BX_CPU_C::VMexit_TaskSwitch(bxInstruction_c *i, Bit16u tss_selector, unsigned source) +void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_TaskSwitch(Bit16u tss_selector, unsigned source) { BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest); BX_ERROR(("VMEXIT: task switch")); - VMexit(i, VMX_VMEXIT_TASK_SWITCH, tss_selector | (source << 30)); + VMexit(VMX_VMEXIT_TASK_SWITCH, tss_selector | (source << 30)); } #define BX_VMX_LO_MSR_START 0x00000000 @@ -311,7 +298,7 @@ void BX_CPP_AttrRegparmN(3) BX_CPU_C::VMexit_TaskSwitch(bxInstruction_c *i, Bit1 #define BX_VMX_HI_MSR_START 0xC0000000 #define BX_VMX_HI_MSR_END 0xC0001FFF -void BX_CPP_AttrRegparmN(3) BX_CPU_C::VMexit_MSR(bxInstruction_c *i, unsigned op, Bit32u msr) +void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_MSR(unsigned op, Bit32u msr) { BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest); @@ -347,7 +334,7 @@ void BX_CPP_AttrRegparmN(3) BX_CPU_C::VMexit_MSR(bxInstruction_c *i, unsigned op if (vmexit) { BX_DEBUG(("VMEXIT: %sMSR 0x%08x", (op == VMX_VMEXIT_RDMSR) ? "RD" : "WR", msr)); - VMexit(i, op, 0); + VMexit(op, 0); } } @@ -469,7 +456,7 @@ void BX_CPP_AttrRegparmN(3) BX_CPU_C::VMexit_IO(bxInstruction_c *i, unsigned por VMwrite32(VMCS_32BIT_VMEXIT_INSTRUCTION_INFO, instruction_info); } - VMexit(i, VMX_VMEXIT_IO_INSTRUCTION, qualification | (len-1) | (port << 16)); + VMexit(VMX_VMEXIT_IO_INSTRUCTION, qualification | (len-1) | (port << 16)); } } @@ -487,7 +474,7 @@ void BX_CPP_AttrRegparmN(3) BX_CPU_C::VMexit_IO(bxInstruction_c *i, unsigned por // [63:32] | reserved // -bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_CLTS(bxInstruction_c *i) +bx_bool BX_CPU_C::VMexit_CLTS(void) { BX_ASSERT(BX_CPU_THIS_PTR in_vmx_guest); @@ -500,7 +487,7 @@ bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_CLTS(bxInstruction_c *i) // all rest of the fields cleared to zero Bit64u qualification = VMX_VMEXIT_CR_ACCESS_CLTS << 4; - VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification); + VMexit(VMX_VMEXIT_CR_ACCESS, qualification); } if ((vm->vm_cr0_mask & 0x8) != 0 && (vm->vm_cr0_read_shadow & 0x8) == 0) @@ -533,7 +520,7 @@ Bit32u BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_LMSW(bxInstruction_c *i, Bit32u m VMwrite_natural(VMCS_GUEST_LINEAR_ADDR, get_laddr(i->seg(), RMAddr(i))); } - VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification); + VMexit(VMX_VMEXIT_CR_ACCESS, qualification); } // keep untouched all the bits set in CR0 mask @@ -550,7 +537,7 @@ bx_address BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_CR0_Write(bxInstruction_c *i, { BX_DEBUG(("VMEXIT: CR0 write")); Bit64u qualification = i->rm() << 8; - VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification); + VMexit(VMX_VMEXIT_CR_ACCESS, qualification); } // keep untouched all the bits set in CR0 mask @@ -567,7 +554,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_CR3_Read(bxInstruction_c *i) Bit64u qualification = 3 | (VMX_VMEXIT_CR_ACCESS_CR_READ << 4); qualification |= (i->rm() << 8); - VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification); + VMexit(VMX_VMEXIT_CR_ACCESS, qualification); } } @@ -584,7 +571,7 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_CR3_Write(bxInstruction_c *i, bx_ad BX_DEBUG(("VMEXIT: CR3 write")); Bit64u qualification = 3 | (i->rm() << 8); - VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification); + VMexit(VMX_VMEXIT_CR_ACCESS, qualification); } } @@ -598,7 +585,7 @@ bx_address BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_CR4_Write(bxInstruction_c *i, { BX_DEBUG(("VMEXIT: CR4 write")); Bit64u qualification = 4 | (i->rm() << 8); - VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification); + VMexit(VMX_VMEXIT_CR_ACCESS, qualification); } // keep untouched all the bits set in CR4 mask @@ -615,7 +602,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_CR8_Read(bxInstruction_c *i) Bit64u qualification = 8 | (VMX_VMEXIT_CR_ACCESS_CR_READ << 4); qualification |= (i->rm() << 8); - VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification); + VMexit(VMX_VMEXIT_CR_ACCESS, qualification); } } @@ -626,7 +613,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_CR8_Write(bxInstruction_c *i) if (VMEXIT(VMX_VM_EXEC_CTRL2_CR8_WRITE_VMEXIT)) { BX_DEBUG(("VMEXIT: CR8 write")); Bit64u qualification = 8 | (i->rm() << 8); - VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification); + VMexit(VMX_VMEXIT_CR_ACCESS, qualification); } } @@ -653,7 +640,7 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_DR_Access(bxInstruction_c *i, unsig if (read) qualification |= (1 << 4); - VMexit(i, VMX_VMEXIT_DR_ACCESS, qualification); + VMexit(VMX_VMEXIT_DR_ACCESS, qualification); } } @@ -680,7 +667,7 @@ void BX_CPU_C::VMX_Write_VTPR(Bit8u vtpr) if (tpr_shadow < vm->vm_tpr_threshold) { // commit current instruction to produce trap-like VMexit BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP - VMexit(0, VMX_VMEXIT_TPR_THRESHOLD, 0); + VMexit(VMX_VMEXIT_TPR_THRESHOLD, 0); } } @@ -720,7 +707,7 @@ void BX_CPU_C::VMX_Virtual_Apic_Read(bx_phy_address paddr, unsigned len, void *d Bit32u qualification = offset | (BX_CPU_THIS_PTR in_event) ? VMX_APIC_ACCESS_DURING_EVENT_DELIVERY : VMX_APIC_READ_INSTRUCTION_EXECUTION; - VMexit(0, VMX_VMEXIT_APIC_ACCESS, qualification); + VMexit(VMX_VMEXIT_APIC_ACCESS, qualification); } // apic virtualization @@ -738,7 +725,7 @@ void BX_CPU_C::VMX_Virtual_Apic_Write(bx_phy_address paddr, unsigned len, void * Bit32u qualification = offset | (BX_CPU_THIS_PTR in_event) ? VMX_APIC_ACCESS_DURING_EVENT_DELIVERY : VMX_APIC_WRITE_INSTRUCTION_EXECUTION; - VMexit(0, VMX_VMEXIT_APIC_ACCESS, qualification); + VMexit(VMX_VMEXIT_APIC_ACCESS, qualification); } #endif diff --git a/bochs/cpu/vmfunc.cc b/bochs/cpu/vmfunc.cc index 7fd18f702..98f222964 100644 --- a/bochs/cpu/vmfunc.cc +++ b/bochs/cpu/vmfunc.cc @@ -42,12 +42,12 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMFUNC(bxInstruction_c *i) if (0 == (vm->vmfunc_ctrls & (BX_CONST64(1)<= 2 -void BX_CPP_AttrRegparmN(1) BX_CPU_C::vmfunc_eptp_switching(bxInstruction_c *i) +void BX_CPU_C::vmfunc_eptp_switching(void) { Bit32u eptp_list_entry = ECX; if (eptp_list_entry >= 512) { BX_ERROR(("vmfunc_eptp_switching: invalid EPTP list entry %d", eptp_list_entry)); - VMexit_Instruction(i, VMX_VMEXIT_VMFUNC); + VMexit(VMX_VMEXIT_VMFUNC, 0); } VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs; @@ -74,7 +74,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::vmfunc_eptp_switching(bxInstruction_c *i) access_read_physical(vm->eptp_list_address + 8 * ECX, 8, &temp_eptp); if (! is_eptptr_valid(temp_eptp)) { BX_ERROR(("vmfunc_eptp_switching: invalid EPTP value in EPTP entry %d", ECX)); - VMexit_Instruction(i, VMX_VMEXIT_VMFUNC); + VMexit(VMX_VMEXIT_VMFUNC, 0); } vm->eptptr = temp_eptp; diff --git a/bochs/cpu/vmx.cc b/bochs/cpu/vmx.cc index 876d3a91b..c464dd91e 100644 --- a/bochs/cpu/vmx.cc +++ b/bochs/cpu/vmx.cc @@ -2077,7 +2077,7 @@ void BX_CPU_C::VMexitLoadHostState(void) BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_CONTEXT_SWITCH, 0); } -void BX_CPU_C::VMexit(bxInstruction_c *i, Bit32u reason, Bit64u qualification) +void BX_CPU_C::VMexit(Bit32u reason, Bit64u qualification) { VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs; @@ -2086,11 +2086,6 @@ void BX_CPU_C::VMexit(bxInstruction_c *i, Bit32u reason, Bit64u qualification) BX_PANIC(("PANIC: VMEXIT not in VMX guest mode !")); } - // VMEXITs are FAULT-like: restore RIP/RSP to value before VMEXIT occurred - RIP = BX_CPU_THIS_PTR prev_rip; - if (BX_CPU_THIS_PTR speculative_rsp) - RSP = BX_CPU_THIS_PTR prev_rsp; - // // STEP 0: Update VMEXIT reason // @@ -2098,8 +2093,8 @@ void BX_CPU_C::VMexit(bxInstruction_c *i, Bit32u reason, Bit64u qualification) VMwrite32(VMCS_32BIT_VMEXIT_REASON, reason); VMwrite_natural(VMCS_VMEXIT_QUALIFICATION, qualification); - if (i != 0) - VMwrite32(VMCS_32BIT_VMEXIT_INSTRUCTION_LENGTH, i->ilen()); + // clipping with 0xf not really necessary but keep it for safety + VMwrite32(VMCS_32BIT_VMEXIT_INSTRUCTION_LENGTH, (RIP-BX_CPU_THIS_PTR prev_rip) & 0xf); reason &= 0xffff; /* keep only basic VMEXIT reason */ @@ -2116,6 +2111,11 @@ void BX_CPU_C::VMexit(bxInstruction_c *i, Bit32u reason, Bit64u qualification) VMwrite32(VMCS_32BIT_IDT_VECTORING_INFO, 0); } + // VMEXITs are FAULT-like: restore RIP/RSP to value before VMEXIT occurred + RIP = BX_CPU_THIS_PTR prev_rip; + if (BX_CPU_THIS_PTR speculative_rsp) + RSP = BX_CPU_THIS_PTR prev_rsp; + // // STEP 1: Saving Guest State to VMCS // @@ -2244,7 +2244,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMXOFF(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx_guest) { BX_ERROR(("VMEXIT: VMXOFF in VMX non-root operation")); - VMexit_Instruction(i, VMX_VMEXIT_VMXOFF); + VMexit(VMX_VMEXIT_VMXOFF, 0); } if (CPL != 0) { @@ -2280,7 +2280,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMCALL(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx_guest) { BX_ERROR(("VMEXIT: VMCALL in VMX non-root operation")); - VMexit_Instruction(i, VMX_VMEXIT_VMCALL); + VMexit(VMX_VMEXIT_VMCALL, 0); } if (BX_CPU_THIS_PTR get_VM() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT) @@ -2366,7 +2366,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMLAUNCH(bxInstruction_c *i) if (BX_CPU_THIS_PTR in_vmx_guest) { BX_ERROR(("VMEXIT: VMLAUNCH in VMX non-root operation")); - VMexit_Instruction(i, vmlaunch ? VMX_VMEXIT_VMLAUNCH : VMX_VMEXIT_VMRESUME); + VMexit(vmlaunch ? VMX_VMEXIT_VMLAUNCH : VMX_VMEXIT_VMRESUME, 0); } if (CPL != 0) { @@ -2436,13 +2436,13 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMLAUNCH(bxInstruction_c *i) Bit32u state_load_error = VMenterLoadCheckGuestState(&qualification); if (state_load_error) { BX_ERROR(("VMEXIT: Guest State Checks Failed")); - VMexit(0, VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE | (1 << 31), qualification); + VMexit(VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE | (1 << 31), qualification); } Bit32u msr = LoadMSRs(BX_CPU_THIS_PTR vmcs.vmentry_msr_load_cnt, BX_CPU_THIS_PTR vmcs.vmentry_msr_load_addr); if (msr) { BX_ERROR(("VMEXIT: Error when loading guest MSR 0x%08x", msr)); - VMexit(0, VMX_VMEXIT_VMENTRY_FAILURE_MSR | (1 << 31), msr); + VMexit(VMX_VMEXIT_VMENTRY_FAILURE_MSR | (1 << 31), msr); } /////////////////////////////////////////////////////// @@ -3095,7 +3095,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::GETSEC(bxInstruction_c *i) #if BX_SUPPORT_VMX if (BX_CPU_THIS_PTR in_vmx_guest) { BX_ERROR(("VMEXIT: GETSEC in VMX non-root operation")); - VMexit(i, VMX_VMEXIT_GETSEC, 0); + VMexit(VMX_VMEXIT_GETSEC, 0); } #endif diff --git a/bochs/cpu/xsave.cc b/bochs/cpu/xsave.cc index f22b999d2..9fadfeba8 100644 --- a/bochs/cpu/xsave.cc +++ b/bochs/cpu/xsave.cc @@ -403,7 +403,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::XSETBV(bxInstruction_c *i) #if BX_SUPPORT_VMX if (BX_CPU_THIS_PTR in_vmx_guest) { BX_ERROR(("VMEXIT: XSETBV in VMX non-root operation")); - VMexit(i, VMX_VMEXIT_XSETBV, 0); + VMexit(VMX_VMEXIT_XSETBV, 0); } #endif