move inhibit interrrupts functionality to icount interface

This commit is contained in:
Stanislav Shwartsman 2011-12-21 06:17:45 +00:00
parent e38772006f
commit e7ed8aca5c
9 changed files with 55 additions and 70 deletions

View File

@ -59,9 +59,7 @@ void BX_CPU_C::cpu_loop(void)
if (setjmp(BX_CPU_THIS_PTR jmp_buf_env)) {
// can get here only from exception function or VMEXIT
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
BX_CPU_THIS_PTR icount++;
#endif
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(0);
#if BX_DEBUGGER || BX_GDBSTUB
if (dbg_instruction_epilog()) return;
@ -132,6 +130,8 @@ void BX_CPU_C::cpu_loop(void)
BX_CPU_CALL_METHOD(i->execute, (i)); // might iterate repeat instruction
BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
BX_INSTR_AFTER_EXECUTION(BX_CPU_ID, i);
BX_CPU_THIS_PTR icount++;
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(0);
// note instructions generating exceptions never reach this point
@ -268,9 +268,8 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat(bxInstruction_c *i, BxRepIterationP
#endif
break; // exit always if debugger enabled
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
BX_CPU_THIS_PTR icount++;
#endif
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
}
}
@ -290,9 +289,8 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat(bxInstruction_c *i, BxRepIterationP
#endif
break; // exit always if debugger enabled
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
BX_CPU_THIS_PTR icount++;
#endif
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
}
}
@ -311,9 +309,8 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat(bxInstruction_c *i, BxRepIterationP
#endif
break; // exit always if debugger enabled
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
BX_CPU_THIS_PTR icount++;
#endif
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
}
}
@ -358,9 +355,8 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c *i, BxRepIterati
#endif
break; // exit always if debugger enabled
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
BX_CPU_THIS_PTR icount++;
#endif
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
}
}
@ -380,9 +376,8 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c *i, BxRepIterati
#endif
break; // exit always if debugger enabled
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
BX_CPU_THIS_PTR icount++;
#endif
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
}
}
@ -401,9 +396,8 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c *i, BxRepIterati
#endif
break; // exit always if debugger enabled
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
BX_CPU_THIS_PTR icount++;
#endif
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
}
}
@ -424,9 +418,8 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c *i, BxRepIterati
#endif
break; // exit always if debugger enabled
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
BX_CPU_THIS_PTR icount++;
#endif
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
}
}
@ -446,9 +439,8 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c *i, BxRepIterati
#endif
break; // exit always if debugger enabled
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
BX_CPU_THIS_PTR icount++;
#endif
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
}
}
@ -467,9 +459,8 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c *i, BxRepIterati
#endif
break; // exit always if debugger enabled
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
BX_CPU_THIS_PTR icount++;
#endif
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
}
}
@ -490,7 +481,7 @@ unsigned BX_CPU_C::handleAsyncEvent(void)
//
// This area is where we process special conditions and events.
//
if (BX_CPU_THIS_PTR activity_state) {
if (BX_CPU_THIS_PTR activity_state != BX_ACTIVITY_STATE_ACTIVE) {
// For one processor, pass the time as quickly as possible until
// an interrupt wakes up the CPU.
while (1)
@ -542,15 +533,13 @@ unsigned BX_CPU_C::handleAsyncEvent(void)
BX_TICKN(10); // when in HLT run time faster for single CPU
}
} else if (bx_pc_system.kill_bochs_request) {
}
if (bx_pc_system.kill_bochs_request) {
// setting kill_bochs_request causes the cpu loop to return ASAP.
return 1; // Return to caller of cpu_loop.
}
// VMLAUNCH/VMRESUME cannot be executed with interrupts inhibited.
// Save inhibit interrupts state into shadow bits after clearing
BX_CPU_THIS_PTR inhibit_mask = (BX_CPU_THIS_PTR inhibit_mask << 2) & 0xF;
// Priority 1: Hardware Reset and Machine Checks
// RESET
// Machine Check
@ -599,10 +588,8 @@ unsigned BX_CPU_C::handleAsyncEvent(void)
// Priority 4: Traps on Previous Instruction
// Breakpoints
// Debug Trap Exceptions (TF flag set or data/IO breakpoint)
if (! (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG_SHADOW)) {
// A trap may be inhibited on this boundary due to an instruction
// which loaded SS. If so we clear the inhibit_mask below
// and don't execute this code until the next boundary.
if (! interrupts_inhibited(BX_INHIBIT_DEBUG)) {
// A trap may be inhibited on this boundary due to an instruction which loaded SS.
#if BX_X86_DEBUGGER
code_breakpoint_match(get_laddr(BX_SEG_REG_CS, BX_CPU_THIS_PTR prev_rip));
#endif
@ -623,12 +610,9 @@ unsigned BX_CPU_C::handleAsyncEvent(void)
// Priority 5: External Interrupts
// NMI Interrupts
// Maskable Hardware Interrupts
if (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_INTERRUPTS_SHADOW) {
if (interrupts_inhibited(BX_INHIBIT_INTERRUPTS)) {
// Processing external interrupts is inhibited on this
// boundary because of certain instructions like STI.
// inhibit_mask is cleared below, in which case we will have
// an opportunity to check interrupts on the next instruction
// boundary.
}
#if BX_SUPPORT_VMX
else if (! BX_CPU_THIS_PTR disable_NMI && BX_CPU_THIS_PTR in_vmx_guest &&
@ -733,7 +717,9 @@ unsigned BX_CPU_C::handleAsyncEvent(void)
// BX_CPU_THIS_PTR get_TF() // implies debug_trap is set
BX_HRQ
#if BX_SUPPORT_VMX
|| BX_CPU_THIS_PTR vmx_interrupt_window || BX_CPU_THIS_PTR inhibit_mask
|| BX_CPU_THIS_PTR vmx_interrupt_window
|| (! BX_CPU_THIS_PTR disable_NMI && BX_CPU_THIS_PTR in_vmx_guest &&
VMEXIT(VMX_VM_EXEC_CTRL2_NMI_WINDOW_VMEXIT))
#endif
#if BX_SUPPORT_VMX >= 2
|| BX_CPU_THIS_PTR pending_vmx_timer_expired
@ -748,6 +734,18 @@ unsigned BX_CPU_C::handleAsyncEvent(void)
return 0; // Continue executing cpu_loop.
}
// Certain instructions inhibit interrupts, some debug exceptions and single-step traps.
void BX_CPU_C::inhibit_interrupts(unsigned mask)
{
BX_DEBUG(("inhibit interrupts mask = %d", mask));
BX_CPU_THIS_PTR inhibit_mask = mask;
BX_CPU_THIS_PTR inhibit_icount = get_icount() + 1; // inhibit for next instruction
}
bx_bool BX_CPU_C::interrupts_inhibited(unsigned mask)
{
return (get_icount() <= BX_CPU_THIS_PTR inhibit_icount) && (BX_CPU_THIS_PTR inhibit_mask & mask) == mask;
}
// boundaries of consideration:
//
@ -803,7 +801,7 @@ void BX_CPU_C::prefetch(void)
if (hwbreakpoint_check(laddr, BX_HWDebugInstruction, BX_HWDebugInstruction)) {
BX_CPU_THIS_PTR async_event = 1;
BX_CPU_THIS_PTR codebp = 1;
if (! (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG_SHADOW)) {
if (! interrupts_inhibited(BX_INHIBIT_DEBUG)) {
// The next instruction could already hit a code breakpoint but
// async_event won't take effect immediatelly.
// Check if the next executing instruction hits code breakpoint
@ -903,8 +901,6 @@ bx_bool BX_CPU_C::dbg_instruction_epilog(void)
bx_address debug_eip = RIP;
Bit16u cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
BX_CPU_THIS_PTR icount++;
BX_CPU_THIS_PTR guard_found.cs = cs;
BX_CPU_THIS_PTR guard_found.eip = debug_eip;
BX_CPU_THIS_PTR guard_found.laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, debug_eip);
@ -942,7 +938,7 @@ bx_bool BX_CPU_C::dbg_instruction_epilog(void)
// see if debugger requesting icount guard
if (bx_guard.guard_for & BX_DBG_GUARD_ICOUNT) {
if (BX_CPU_THIS_PTR icount >= BX_CPU_THIS_PTR guard_found.icount_max) {
if (get_icount() >= BX_CPU_THIS_PTR guard_found.icount_max) {
return(1);
}
}

View File

@ -904,24 +904,19 @@ public: // for now...
bx_address prev_rsp;
bx_bool speculative_rsp;
#if BX_DEBUGGER || BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
Bit64u icount;
Bit64u icount_last_sync;
#endif
#define BX_INHIBIT_INTERRUPTS 0x01
#define BX_INHIBIT_DEBUG 0x02
#define BX_INHIBIT_INTERRUPTS_SHADOW 0x04
#define BX_INHIBIT_DEBUG_SHADOW 0x08
#define BX_INHIBIT_INTERRUPTS_BY_MOVSS \
(BX_INHIBIT_INTERRUPTS | BX_INHIBIT_DEBUG)
#define BX_INHIBIT_INTERRUPTS_BY_MOVSS_SHADOW \
(BX_INHIBIT_INTERRUPTS_SHADOW | BX_INHIBIT_DEBUG_SHADOW)
// What events to inhibit at any given time. Certain instructions
// inhibit interrupts, some debug exceptions and single-step traps.
unsigned inhibit_mask;
Bit64u inhibit_icount;
/* user segment register set */
bx_segment_reg_t sregs[6];
@ -3853,6 +3848,8 @@ public: // for now...
BX_SMF void TLB_flush(void);
BX_SMF void TLB_invlpg(bx_address laddr);
BX_SMF void set_INTR(bx_bool value);
BX_SMF void inhibit_interrupts(unsigned mask);
BX_SMF bx_bool interrupts_inhibited(unsigned mask);
BX_SMF const char *strseg(bx_segment_reg_t *seg);
BX_SMF void interrupt(Bit8u vector, unsigned type, bx_bool push_error,
Bit16u error_code);
@ -4028,10 +4025,8 @@ public: // for now...
BX_SMF BX_CPP_INLINE int bx_cpuid_support_tsc_deadline(void);
BX_SMF BX_CPP_INLINE unsigned which_cpu(void) { return BX_CPU_THIS_PTR bx_cpuid; }
#if BX_DEBUGGER || BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
BX_SMF BX_CPP_INLINE Bit64u get_icount(void) { return BX_CPU_THIS_PTR icount; }
BX_SMF BX_CPP_INLINE Bit64u get_icount_last_sync(void) { return BX_CPU_THIS_PTR icount_last_sync; }
#endif
BX_SMF BX_CPP_INLINE const bx_gen_reg_t *get_gen_regfile() { return BX_CPU_THIS_PTR gen_reg; }
BX_SMF BX_CPP_INLINE bx_address get_instruction_pointer(void);

View File

@ -128,8 +128,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MOV_SwEw(bxInstruction_c *i)
// trap exceptions until the execution boundary following the
// next instruction is reached.
// Same code as POP_SS()
BX_CPU_THIS_PTR inhibit_mask |= BX_INHIBIT_INTERRUPTS_BY_MOVSS;
BX_CPU_THIS_PTR async_event = 1;
inhibit_interrupts(BX_INHIBIT_INTERRUPTS_BY_MOVSS);
}
BX_NEXT_INSTR(i);

View File

@ -200,7 +200,7 @@ static const BxOpcodeInfo_t BxOpcodeInfo32[512*2] = {
/* 14 /w */ { BxImmediate_Ib, BX_IA_ADC_ALIb },
/* 15 /w */ { BxImmediate_Iw, BX_IA_ADC_AXIw },
/* 16 /w */ { 0, BX_IA_PUSH16_SS },
/* 17 /w */ { BxTraceEnd, BX_IA_POP16_SS }, // async_event = 1
/* 17 /w */ { 0, BX_IA_POP16_SS },
/* 18 /w */ { BxLockable | BxArithDstRM, BX_IA_SBB_EbGb },
/* 19 /w */ { BxLockable | BxArithDstRM, BX_IA_SBB_EwGw },
/* 1A /w */ { 0, BX_IA_SBB_GbEb },
@ -745,7 +745,7 @@ static const BxOpcodeInfo_t BxOpcodeInfo32[512*2] = {
/* 14 /d */ { BxImmediate_Ib, BX_IA_ADC_ALIb },
/* 15 /d */ { BxImmediate_Id, BX_IA_ADC_EAXId },
/* 16 /d */ { 0, BX_IA_PUSH32_SS },
/* 17 /d */ { BxTraceEnd, BX_IA_POP32_SS }, // async_event = 1
/* 17 /d */ { 0, BX_IA_POP32_SS },
/* 18 /d */ { BxLockable | BxArithDstRM, BX_IA_SBB_EbGb },
/* 19 /d */ { BxLockable | BxArithDstRM, BX_IA_SBB_EdGd },
/* 1A /d */ { 0, BX_IA_SBB_GbEb },

View File

@ -141,7 +141,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::STI(bxInstruction_c *i)
if (! BX_CPU_THIS_PTR get_IF()) {
BX_CPU_THIS_PTR assert_IF();
BX_CPU_THIS_PTR inhibit_mask |= BX_INHIBIT_INTERRUPTS;
inhibit_interrupts(BX_INHIBIT_INTERRUPTS);
BX_CPU_THIS_PTR async_event = 1;
}

View File

@ -366,11 +366,10 @@ void BX_CPU_C::register_state(void)
BXRS_DEC_PARAM_SIMPLE(cpu, cpu_mode);
BXRS_HEX_PARAM_SIMPLE(cpu, activity_state);
BXRS_HEX_PARAM_SIMPLE(cpu, inhibit_mask);
BXRS_HEX_PARAM_SIMPLE(cpu, inhibit_icount);
BXRS_HEX_PARAM_SIMPLE(cpu, debug_trap);
#if BX_DEBUGGER || BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
BXRS_DEC_PARAM_SIMPLE(cpu, icount);
BXRS_DEC_PARAM_SIMPLE(cpu, icount_last_sync);
#endif
#if BX_SUPPORT_X86_64
BXRS_HEX_PARAM_SIMPLE(cpu, RAX);
BXRS_HEX_PARAM_SIMPLE(cpu, RBX);
@ -783,13 +782,13 @@ void BX_CPU_C::reset(unsigned source)
// status and control flags register set
setEFlags(0x2); // Bit1 is always set
#if BX_DEBUGGER || BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS || BX_SUPPORT_SMP
if (source == BX_RESET_HARDWARE)
BX_CPU_THIS_PTR icount = 0;
BX_CPU_THIS_PTR icount_last_sync = BX_CPU_THIS_PTR icount;
#endif
BX_CPU_THIS_PTR inhibit_mask = 0;
BX_CPU_THIS_PTR inhibit_icount = 0;
BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
BX_CPU_THIS_PTR debug_trap = 0;

View File

@ -134,8 +134,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::POP16_SS(bxInstruction_c *i)
// trap exceptions until the execution boundary following the
// next instruction is reached.
// Same code as MOV_SwEw()
BX_CPU_THIS_PTR inhibit_mask |= BX_INHIBIT_INTERRUPTS_BY_MOVSS;
BX_CPU_THIS_PTR async_event = 1;
inhibit_interrupts(BX_INHIBIT_INTERRUPTS_BY_MOVSS);
BX_NEXT_TRACE(i); // async event is set
}

View File

@ -249,8 +249,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::POP32_SS(bxInstruction_c *i)
// trap exceptions until the execution boundary following the
// next instruction is reached.
// Same code as MOV_SwEw()
BX_CPU_THIS_PTR inhibit_mask |= BX_INHIBIT_INTERRUPTS_BY_MOVSS;
BX_CPU_THIS_PTR async_event = 1;
inhibit_interrupts(BX_INHIBIT_INTERRUPTS_BY_MOVSS);
BX_NEXT_TRACE(i); // async event is set
}

View File

@ -1615,15 +1615,13 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
BX_CPU_THIS_PTR async_event = 1;
if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_STI)
BX_CPU_THIS_PTR inhibit_mask = BX_INHIBIT_INTERRUPTS;
inhibit_interrupts(BX_INHIBIT_INTERRUPTS);
else if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_MOV_SS)
BX_CPU_THIS_PTR inhibit_mask = BX_INHIBIT_INTERRUPTS | BX_INHIBIT_DEBUG;
else BX_CPU_THIS_PTR inhibit_mask = 0;
inhibit_interrupts(BX_INHIBIT_INTERRUPTS_BY_MOVSS);
else
BX_CPU_THIS_PTR inhibit_mask = 0;
}
if (BX_CPU_THIS_PTR inhibit_mask)
BX_CPU_THIS_PTR async_event = 1;
if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED) {
BX_CPU_THIS_PTR disable_NMI = 1;
}
@ -1888,8 +1886,8 @@ void BX_CPU_C::VMexitSaveGuestState(void)
VMwrite_natural(VMCS_GUEST_PENDING_DBG_EXCEPTIONS, tmpDR6 & 0x0000500f);
Bit32u interruptibility_state = 0;
if (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_INTERRUPTS_SHADOW) {
if (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG_SHADOW)
if (interrupts_inhibited(BX_INHIBIT_INTERRUPTS)) {
if (interrupts_inhibited(BX_INHIBIT_DEBUG))
interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_BY_MOV_SS;
else
interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_BY_STI;
@ -2417,7 +2415,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMLAUNCH(bxInstruction_c *i)
BX_NEXT_TRACE(i);
}
if ((BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_INTERRUPTS_BY_MOVSS_SHADOW) == BX_INHIBIT_INTERRUPTS_BY_MOVSS_SHADOW) {
if (interrupts_inhibited(BX_INHIBIT_INTERRUPTS_BY_MOVSS)) {
BX_ERROR(("VMFAIL: VMLAUNCH with interrupts blocked by MOV_SS !"));
VMfail(VMXERR_VMENTRY_MOV_SS_BLOCKING);
BX_NEXT_TRACE(i);