introducing new interface for handling CPU events based on vector of events and not on many not related variables. this is very initial implementation which takes into new interface only few events, more will code soon

This commit is contained in:
Stanislav Shwartsman 2012-09-25 09:35:38 +00:00
parent d29fb9a592
commit 40ba9c8d7b
12 changed files with 100 additions and 95 deletions

View File

@ -1173,8 +1173,7 @@ void bx_local_apic_c::deactivate_vmx_preemption_timer(void)
void bx_local_apic_c::vmx_preemption_timer_expired(void *this_ptr)
{
bx_local_apic_c *class_ptr = (bx_local_apic_c *) this_ptr;
class_ptr->cpu->pending_vmx_timer_expired = 1;
class_ptr->cpu->async_event = 1;
class_ptr->cpu->signal_event(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED);
class_ptr->deactivate_vmx_preemption_timer();
}
#endif

View File

@ -589,8 +589,7 @@ void BX_CPU_C::prefetch(void)
#if BX_X86_DEBUGGER
if (hwbreakpoint_check(laddr, BX_HWDebugInstruction, BX_HWDebugInstruction)) {
BX_CPU_THIS_PTR async_event = 1;
BX_CPU_THIS_PTR codebp = 1;
signal_event(BX_EVENT_CODE_BREAKPOINT_ASSIST);
if (! interrupts_inhibited(BX_INHIBIT_DEBUG)) {
// The next instruction could already hit a code breakpoint but
// async_event won't take effect immediatelly.
@ -609,7 +608,7 @@ void BX_CPU_C::prefetch(void)
}
}
else {
BX_CPU_THIS_PTR codebp = 0;
clear_event(BX_EVENT_CODE_BREAKPOINT_ASSIST);
}
#endif

View File

@ -611,15 +611,15 @@ BOCHSAPI extern BX_CPU_C bx_cpu;
}
// invalidate prefetch queue and call prefetch() when RF is set
#define IMPLEMENT_EFLAG_SET_ACCESSOR_RF(name,bitnum) \
BX_CPP_INLINE void BX_CPU_C::assert_##name() { \
#define IMPLEMENT_EFLAG_SET_ACCESSOR_RF(bitnum) \
BX_CPP_INLINE void BX_CPU_C::assert_RF() { \
invalidate_prefetch_q(); \
BX_CPU_THIS_PTR eflags |= (1<<bitnum); \
} \
BX_CPP_INLINE void BX_CPU_C::clear_##name() { \
BX_CPP_INLINE void BX_CPU_C::clear_RF() { \
BX_CPU_THIS_PTR eflags &= ~(1<<bitnum); \
} \
BX_CPP_INLINE void BX_CPU_C::set_##name(bx_bool val) { \
BX_CPP_INLINE void BX_CPU_C::set_RF(bx_bool val) { \
if (val) invalidate_prefetch_q(); \
BX_CPU_THIS_PTR eflags = \
(BX_CPU_THIS_PTR eflags&~(1<<bitnum))|((val)<<bitnum); \
@ -1049,10 +1049,6 @@ public: // for now...
bx_bool in_vmx_guest;
bx_bool in_smm_vmx; // save in_vmx and in_vmx_guest flags when in SMM mode
bx_bool in_smm_vmx_guest;
bx_bool vmx_interrupt_window;
#if BX_SUPPORT_VMX >= 2
bx_bool pending_vmx_timer_expired;
#endif
Bit64u vmcsptr;
bx_hostpageaddr_t vmcshostptr;
Bit64u vmxonptr;
@ -1094,24 +1090,54 @@ public: // for now...
#define BX_ACTIVITY_STATE_MWAIT_IF (5)
unsigned activity_state;
#define BX_EVENT_NMI (1<<0)
#define BX_EVENT_SMI (1<<1)
#define BX_EVENT_INIT (1<<2)
#define BX_EVENT_CODE_BREAKPOINT_ASSIST (1<<3)
#define BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED (1<<4)
#define BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING (1<<5)
#define BX_EVENT_VMX_NMI_WINDOW_EXITING (1<<6)
// later the event list will grow rapidly
Bit32u pending_event;
Bit32u event_mask;
Bit32u async_event;
#define BX_ASYNC_EVENT_STOP_TRACE (0x80000000)
BX_SMF BX_CPP_INLINE void signal_event(Bit32u event) {
BX_CPU_THIS_PTR pending_event |= event;
BX_CPU_THIS_PTR async_event = 1;
}
BX_SMF BX_CPP_INLINE void clear_event(Bit32u event) {
BX_CPU_THIS_PTR pending_event &= ~event;
}
BX_SMF BX_CPP_INLINE void mask_event(Bit32u event) {
BX_CPU_THIS_PTR event_mask |= event;
}
BX_SMF BX_CPP_INLINE void unmask_event(Bit32u event) {
BX_CPU_THIS_PTR event_mask &= ~event;
}
BX_SMF BX_CPP_INLINE bx_bool is_masked_event(Bit32u event) {
return (BX_CPU_THIS_PTR event_mask & event) != 0;
}
BX_SMF BX_CPP_INLINE bx_bool is_pending(Bit32u event) {
return (BX_CPU_THIS_PTR pending_event & event) != 0;
}
BX_SMF BX_CPP_INLINE bx_bool is_unmasked_event_pending(Bit32u event) {
return (BX_CPU_THIS_PTR pending_event & ~BX_CPU_THIS_PTR event_mask & event) != 0;
}
#define BX_ASYNC_EVENT_STOP_TRACE (1<<31)
#if BX_X86_DEBUGGER
bx_bool in_repeat;
bx_bool codebp;
#endif
bx_bool in_smm;
unsigned cpu_mode;
bx_bool user_pl;
bx_bool INTR;
bx_bool pending_SMI;
bx_bool pending_NMI;
bx_bool pending_INIT;
bx_bool disable_SMI;
bx_bool disable_NMI;
bx_bool disable_INIT;
#if BX_CPU_LEVEL >= 5
bx_bool ignore_bad_msrs;
#endif
@ -4756,7 +4782,7 @@ IMPLEMENT_EFLAG_SET_ACCESSOR_AC ( 18)
IMPLEMENT_EFLAG_SET_ACCESSOR (AC, 18)
#endif
IMPLEMENT_EFLAG_SET_ACCESSOR_VM ( 17)
IMPLEMENT_EFLAG_SET_ACCESSOR_RF (RF, 16)
IMPLEMENT_EFLAG_SET_ACCESSOR_RF ( 16)
IMPLEMENT_EFLAG_SET_ACCESSOR (NT, 14)
IMPLEMENT_EFLAG_SET_ACCESSOR (DF, 10)
IMPLEMENT_EFLAG_SET_ACCESSOR_IF_TF(IF, 9)

View File

@ -547,7 +547,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET16(bxInstruction_c *i)
#if BX_SUPPORT_VMX
if (!BX_CPU_THIS_PTR in_vmx_guest || !VMEXIT(VMX_VM_EXEC_CTRL1_NMI_VMEXIT))
#endif
BX_CPU_THIS_PTR disable_NMI = 0;
unmask_event(BX_EVENT_NMI);
#if BX_DEBUGGER
BX_CPU_THIS_PTR show_flag |= Flag_iret;

View File

@ -590,7 +590,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET32(bxInstruction_c *i)
#if BX_SUPPORT_VMX
if (!BX_CPU_THIS_PTR in_vmx_guest || !VMEXIT(VMX_VM_EXEC_CTRL1_NMI_VMEXIT))
#endif
BX_CPU_THIS_PTR disable_NMI = 0;
unmask_event(BX_EVENT_NMI);
#if BX_DEBUGGER
BX_CPU_THIS_PTR show_flag |= Flag_iret;

View File

@ -429,7 +429,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET64(bxInstruction_c *i)
#if BX_SUPPORT_VMX
if (!BX_CPU_THIS_PTR in_vmx_guest || !VMEXIT(VMX_VM_EXEC_CTRL1_NMI_VMEXIT))
#endif
BX_CPU_THIS_PTR disable_NMI = 0;
unmask_event(BX_EVENT_NMI);
#if BX_DEBUGGER
BX_CPU_THIS_PTR show_flag |= Flag_iret;

View File

@ -34,10 +34,7 @@ bx_bool BX_CPU_C::handleWaitForEvent(void)
while (1)
{
if ((BX_CPU_INTR && (BX_CPU_THIS_PTR get_IF() || BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_MWAIT_IF)) ||
#if BX_SUPPORT_VMX >= 2
BX_CPU_THIS_PTR pending_vmx_timer_expired ||
#endif
BX_CPU_THIS_PTR pending_NMI || BX_CPU_THIS_PTR pending_SMI || BX_CPU_THIS_PTR pending_INIT)
is_pending(BX_EVENT_NMI | BX_EVENT_SMI | BX_EVENT_INIT | BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED))
{
// interrupt ends the HALT condition
#if BX_SUPPORT_MONITOR_MWAIT
@ -194,14 +191,14 @@ bx_bool BX_CPU_C::handleAsyncEvent(void)
// STOPCLK
// SMI
// INIT
if (BX_CPU_THIS_PTR pending_SMI && ! BX_CPU_THIS_PTR smm_mode() && SVM_GIF)
if (is_pending(BX_EVENT_SMI) && ! BX_CPU_THIS_PTR smm_mode() && SVM_GIF)
{
// clear SMI pending flag and disable NMI when SMM was accepted
BX_CPU_THIS_PTR pending_SMI = 0;
clear_event(BX_EVENT_SMI);
enter_system_management_mode();
}
if (BX_CPU_THIS_PTR pending_INIT && ! BX_CPU_THIS_PTR disable_INIT && SVM_GIF) {
if (is_unmasked_event_pending(BX_EVENT_INIT) && SVM_GIF) {
#if BX_SUPPORT_SVM
if (BX_CPU_THIS_PTR in_svm_guest) {
if (SVM_INTERCEPT(SVM_INTERCEPT0_INIT)) Svm_Vmexit(SVM_VMEXIT_INIT);
@ -255,13 +252,13 @@ bx_bool BX_CPU_C::handleAsyncEvent(void)
// boundary because of certain instructions like STI.
}
#if BX_SUPPORT_VMX >= 2
else if (BX_CPU_THIS_PTR in_vmx_guest && BX_CPU_THIS_PTR pending_vmx_timer_expired) {
BX_CPU_THIS_PTR pending_vmx_timer_expired = 0;
else if (BX_CPU_THIS_PTR in_vmx_guest && is_pending(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED)) {
clear_event(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED);
VMexit_PreemptionTimerExpired();
}
#endif
#if BX_SUPPORT_VMX
else if (! BX_CPU_THIS_PTR disable_NMI && BX_CPU_THIS_PTR in_vmx_guest &&
else if (! is_masked_event(BX_EVENT_NMI) && BX_CPU_THIS_PTR in_vmx_guest &&
VMEXIT(VMX_VM_EXEC_CTRL2_NMI_WINDOW_VMEXIT))
{
// NMI-window exiting
@ -269,14 +266,14 @@ bx_bool BX_CPU_C::handleAsyncEvent(void)
VMexit(VMX_VMEXIT_NMI_WINDOW, 0);
}
#endif
else if (BX_CPU_THIS_PTR pending_NMI && ! BX_CPU_THIS_PTR disable_NMI) {
else if (is_unmasked_event_pending(BX_EVENT_NMI)) {
#if BX_SUPPORT_SVM
if (BX_CPU_THIS_PTR in_svm_guest) {
if (SVM_INTERCEPT(SVM_INTERCEPT0_NMI)) Svm_Vmexit(SVM_VMEXIT_NMI);
}
#endif
BX_CPU_THIS_PTR pending_NMI = 0;
BX_CPU_THIS_PTR disable_NMI = 1;
clear_event(BX_EVENT_NMI);
mask_event(BX_EVENT_NMI);
BX_CPU_THIS_PTR EXT = 1; /* external event */
#if BX_SUPPORT_VMX
VMexit_Event(BX_NMI, 2, 0, 0);
@ -285,7 +282,7 @@ bx_bool BX_CPU_C::handleAsyncEvent(void)
interrupt(2, BX_NMI, 0, 0);
}
#if BX_SUPPORT_VMX
else if (BX_CPU_THIS_PTR vmx_interrupt_window && BX_CPU_THIS_PTR get_IF()) {
else if (is_pending(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING) && BX_CPU_THIS_PTR get_IF()) {
// interrupt-window exiting
BX_DEBUG(("VMEXIT: interrupt window exiting"));
VMexit(VMX_VMEXIT_INTERRUPT_WINDOW, 0);
@ -351,12 +348,12 @@ bx_bool BX_CPU_C::handleAsyncEvent(void)
// BX_CPU_THIS_PTR get_TF() // implies debug_trap is set
BX_HRQ
#if BX_SUPPORT_VMX
|| BX_CPU_THIS_PTR vmx_interrupt_window
|| (BX_CPU_THIS_PTR in_vmx_guest && ! BX_CPU_THIS_PTR disable_NMI &&
|| is_pending(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING)
|| (BX_CPU_THIS_PTR in_vmx_guest && ! is_masked_event(BX_EVENT_NMI) &&
VMEXIT(VMX_VM_EXEC_CTRL2_NMI_WINDOW_VMEXIT))
#endif
#if BX_SUPPORT_VMX >= 2
|| BX_CPU_THIS_PTR pending_vmx_timer_expired
|| is_pending(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED)
#endif
#if BX_SUPPORT_SVM
|| (BX_CPU_THIS_PTR in_svm_guest && SVM_V_IRQ && BX_CPU_THIS_PTR get_IF() &&
@ -364,7 +361,7 @@ bx_bool BX_CPU_C::handleAsyncEvent(void)
#endif
#if BX_X86_DEBUGGER
// a debug code breakpoint is set in current page
|| BX_CPU_THIS_PTR codebp
|| is_pending(BX_EVENT_CODE_BREAKPOINT_ASSIST)
#endif
))
BX_CPU_THIS_PTR async_event = 0;
@ -391,7 +388,7 @@ void BX_CPU_C::deliver_SIPI(unsigned vector)
BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
RIP = 0;
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], vector*0x100);
BX_CPU_THIS_PTR disable_INIT = 0; // enable INIT pin back
unmask_event(BX_EVENT_INIT); // enable INIT pin back
BX_INFO(("CPU %d started up at %04X:%08X by APIC",
BX_CPU_THIS_PTR bx_cpuid, vector*0x100, EIP));
} else {
@ -401,22 +398,19 @@ void BX_CPU_C::deliver_SIPI(unsigned vector)
void BX_CPU_C::deliver_INIT(void)
{
if (! BX_CPU_THIS_PTR disable_INIT) {
BX_CPU_THIS_PTR pending_INIT = 1;
BX_CPU_THIS_PTR async_event = 1;
if (! is_masked_event(BX_EVENT_INIT)) {
signal_event(BX_EVENT_INIT);
}
}
void BX_CPU_C::deliver_NMI(void)
{
BX_CPU_THIS_PTR pending_NMI = 1;
BX_CPU_THIS_PTR async_event = 1;
signal_event(BX_EVENT_NMI);
}
void BX_CPU_C::deliver_SMI(void)
{
BX_CPU_THIS_PTR pending_SMI = 1;
BX_CPU_THIS_PTR async_event = 1;
signal_event(BX_EVENT_SMI);
}
void BX_CPU_C::set_INTR(bx_bool value)

View File

@ -38,7 +38,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::setEFlags(Bit32u val)
if (val & EFlagsRFMask) invalidate_prefetch_q();
if (val & EFlagsTFMask) {
BX_CPU_THIS_PTR async_event = 1; // TF == 1 || RF == 1
BX_CPU_THIS_PTR async_event = 1; // TF == 1
}
if (val & EFlagsIFMask) {

View File

@ -624,22 +624,16 @@ void BX_CPU_C::register_state(void)
register_svm_state(cpu);
#endif
BXRS_HEX_PARAM_SIMPLE32(cpu, pending_event);
BXRS_HEX_PARAM_SIMPLE32(cpu, event_mask);
BXRS_HEX_PARAM_SIMPLE32(cpu, async_event);
BXRS_PARAM_BOOL(cpu, INTR, INTR);
#if BX_X86_DEBUGGER
BXRS_PARAM_BOOL(cpu, in_repeat, in_repeat);
// for debug only (no need for save/restore), calculated in prefetch()
BXRS_PARAM_BOOL(cpu, codebp, codebp);
#endif
BXRS_PARAM_BOOL(cpu, in_smm, in_smm);
BXRS_PARAM_BOOL(cpu, disable_SMI, disable_SMI);
BXRS_PARAM_BOOL(cpu, pending_SMI, pending_SMI);
BXRS_PARAM_BOOL(cpu, disable_NMI, disable_NMI);
BXRS_PARAM_BOOL(cpu, pending_NMI, pending_NMI);
BXRS_PARAM_BOOL(cpu, disable_INIT, disable_INIT);
BXRS_PARAM_BOOL(cpu, pending_INIT, pending_INIT);
#if BX_DEBUGGER
bx_list_c *tlb = new bx_list_c(cpu, "TLB");
@ -942,15 +936,11 @@ void BX_CPU_C::reset(unsigned source)
#if BX_X86_DEBUGGER
BX_CPU_THIS_PTR in_repeat = 0;
BX_CPU_THIS_PTR codebp = 0;
#endif
BX_CPU_THIS_PTR in_smm = 0;
BX_CPU_THIS_PTR disable_SMI = 0;
BX_CPU_THIS_PTR pending_SMI = 0;
BX_CPU_THIS_PTR disable_NMI = 0;
BX_CPU_THIS_PTR pending_NMI = 0;
BX_CPU_THIS_PTR disable_INIT = 0;
BX_CPU_THIS_PTR pending_INIT = 0;
BX_CPU_THIS_PTR pending_event = 0;
BX_CPU_THIS_PTR event_mask = 0;
if (source == BX_RESET_HARDWARE) {
BX_CPU_THIS_PTR smbase = 0x30000; // do not change SMBASE on INIT
@ -1121,10 +1111,6 @@ void BX_CPU_C::reset(unsigned source)
#if BX_SUPPORT_VMX
BX_CPU_THIS_PTR in_vmx = BX_CPU_THIS_PTR in_vmx_guest = 0;
BX_CPU_THIS_PTR in_smm_vmx = BX_CPU_THIS_PTR in_smm_vmx_guest = 0;
BX_CPU_THIS_PTR vmx_interrupt_window = 0;
#if BX_SUPPORT_VMX >= 2
BX_CPU_THIS_PTR pending_vmx_timer_expired = 0;
#endif
BX_CPU_THIS_PTR vmcsptr = BX_CPU_THIS_PTR vmxonptr = BX_INVALID_VMCSPTR;
BX_CPU_THIS_PTR vmcshostptr = 0;
/* enable VMX, should be done in BIOS instead */

View File

@ -777,10 +777,11 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MWAIT(bxInstruction_c *i)
if (ECX & 1) {
#if BX_SUPPORT_VMX
// When "interrupt window exiting" VMX control is set MWAIT instruction
// won't cause the processor to enter BX_ACTIVITY_STATE_MWAIT_IF sleep
// state with EFLAGS.IF = 0
if (BX_CPU_THIS_PTR vmx_interrupt_window && ! BX_CPU_THIS_PTR get_IF()) {
BX_NEXT_TRACE(i);
// won't cause the processor to enter sleep state with EFLAGS.IF = 0
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (VMEXIT(VMX_VM_EXEC_CTRL2_INTERRUPT_WINDOW_VMEXIT) && ! BX_CPU_THIS_PTR get_IF()) {
BX_NEXT_TRACE(i);
}
}
#endif
BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_MWAIT_IF;

View File

@ -51,7 +51,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RSM(bxInstruction_c *i)
BX_INFO(("RSM: Resuming from System Management Mode"));
BX_CPU_THIS_PTR disable_NMI = 0;
unmask_event(BX_EVENT_NMI | BX_EVENT_SMI);
Bit32u saved_state[SMM_SAVE_STATE_MAP_SIZE], n;
// reset reserved bits
@ -114,7 +114,8 @@ void BX_CPU_C::enter_system_management_mode(void)
#endif
BX_CPU_THIS_PTR in_smm = 1;
BX_CPU_THIS_PTR disable_NMI = 1;
mask_event(BX_EVENT_NMI | BX_EVENT_SMI);
Bit32u saved_state[SMM_SAVE_STATE_MAP_SIZE], n;
// reset reserved bits

View File

@ -1635,7 +1635,7 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
}
if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED) {
BX_CPU_THIS_PTR disable_NMI = 1;
mask_event(BX_EVENT_NMI);
}
else {
if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_NMI_WINDOW_VMEXIT)
@ -1643,8 +1643,7 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
}
if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_INTERRUPT_WINDOW_VMEXIT) {
BX_CPU_THIS_PTR async_event = 1;
BX_CPU_THIS_PTR vmx_interrupt_window = 1; // set up interrupt window exiting
signal_event(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING);
}
handleCpuContextChange();
@ -1876,9 +1875,9 @@ void BX_CPU_C::VMexitSaveGuestState(void)
else
interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_BY_STI;
}
if (BX_CPU_THIS_PTR disable_SMI)
if (is_masked_event(BX_EVENT_SMI))
interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_SMI_BLOCKED;
if (BX_CPU_THIS_PTR disable_NMI)
if (is_masked_event(BX_EVENT_NMI))
interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED;
VMwrite32(VMCS_32BIT_GUEST_INTERRUPTIBILITY_STATE, interruptibility_state);
@ -1896,7 +1895,7 @@ void BX_CPU_C::VMexitSaveGuestState(void)
// Deactivate VMX preemtion timer
BX_CPU_THIS_PTR lapic.deactivate_vmx_preemption_timer();
BX_CPU_THIS_PTR pending_vmx_timer_expired = 0;
clear_event(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED);
// Store back to VMCS
if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_VMX_PREEMPTION_TIMER)
VMwrite32(VMCS_32BIT_GUEST_PREEMPTION_TIMER_VALUE, BX_CPU_THIS_PTR lapic.read_vmx_preemption_timer());
@ -2142,6 +2141,11 @@ void BX_CPU_C::VMexit(Bit32u reason, Bit64u qualification)
BX_CPU_THIS_PTR in_vmx_guest = 0;
// entering VMX root mode: clear possibly pending guest VMX events
clear_event(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING |
BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED |
BX_EVENT_VMX_NMI_WINDOW_EXITING);
//
// STEP 2: Load Host State
//
@ -2161,8 +2165,8 @@ void BX_CPU_C::VMexit(Bit32u reason, Bit64u qualification)
// STEP 4: Go back to VMX host
//
BX_CPU_THIS_PTR disable_INIT = 1; // INIT is disabled in VMX root mode
BX_CPU_THIS_PTR vmx_interrupt_window = 0;
mask_event(BX_EVENT_INIT); // INIT is disabled in VMX root mode
BX_CPU_THIS_PTR errorno = 0;
BX_CPU_THIS_PTR EXT = 0;
@ -2217,7 +2221,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMXON(bxInstruction_c *i)
BX_CPU_THIS_PTR vmcshostptr = 0;
BX_CPU_THIS_PTR vmxonptr = pAddr;
BX_CPU_THIS_PTR in_vmx = 1;
BX_CPU_THIS_PTR disable_INIT = 1; // INIT is disabled in VMX root mode
mask_event(BX_EVENT_INIT); // INIT is disabled in VMX root mode
// block and disable A20M;
#if BX_SUPPORT_MONITOR_MWAIT
@ -2268,7 +2272,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMXOFF(bxInstruction_c *i)
{
BX_CPU_THIS_PTR vmxonptr = BX_INVALID_VMCSPTR;
BX_CPU_THIS_PTR in_vmx = 0; // leave VMX operation mode
BX_CPU_THIS_PTR disable_INIT = 0;
unmask_event(BX_EVENT_INIT);
// unblock and enable A20M;
#if BX_SUPPORT_MONITOR_MWAIT
BX_CPU_THIS_PTR monitor.reset_monitor();
@ -2504,7 +2508,8 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMLAUNCH(bxInstruction_c *i)
*/
BX_CPU_THIS_PTR in_vmx_guest = 1;
BX_CPU_THIS_PTR disable_INIT = 0;
unmask_event(BX_EVENT_INIT);
if (VMEXIT(VMX_VM_EXEC_CTRL2_TSC_OFFSET))
BX_CPU_THIS_PTR tsc_offset = VMread64(VMCS_64BIT_CONTROL_TSC_OFFSET);
@ -2515,13 +2520,11 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMLAUNCH(bxInstruction_c *i)
if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VMX_PREEMPTION_TIMER_VMEXIT)) {
Bit32u timer_value = VMread32(VMCS_32BIT_GUEST_PREEMPTION_TIMER_VALUE);
if (timer_value == 0) {
BX_CPU_THIS_PTR pending_vmx_timer_expired = 1;
BX_CPU_THIS_PTR async_event = 1;
signal_event(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED);
}
else {
// activate VMX preemption timer
BX_DEBUG(("VMX preemption timer active"));
BX_CPU_THIS_PTR pending_vmx_timer_expired = 0;
BX_CPU_THIS_PTR lapic.set_vmx_preemption_timer(timer_value);
}
}
@ -3127,10 +3130,6 @@ void BX_CPU_C::register_vmx_state(bx_param_c *parent)
BXRS_PARAM_BOOL(vmx, in_vmx_guest, BX_CPU_THIS_PTR in_vmx_guest);
BXRS_PARAM_BOOL(vmx, in_smm_vmx, BX_CPU_THIS_PTR in_smm_vmx);
BXRS_PARAM_BOOL(vmx, in_smm_vmx_guest, BX_CPU_THIS_PTR in_smm_vmx_guest);
BXRS_PARAM_BOOL(vmx, vmx_interrupt_window, BX_CPU_THIS_PTR vmx_interrupt_window);
#if BX_SUPPORT_VMX >= 2
BXRS_PARAM_BOOL(vmx, pending_vmx_timer_expired, BX_CPU_THIS_PTR pending_vmx_timer_expired);
#endif
bx_list_c *vmcache = new bx_list_c(vmx, "VMCS_CACHE");