Move INTR, Local APIC INTR and SVN VINTR into new event interface (hardest part)

Minor speedup (of 1-2%) was observed due to new implementation
Remove obsolete dbg_take_irq function and dbg_force_interrupt function from CPU code, the functions were not working properly anyway
This commit is contained in:
Stanislav Shwartsman 2012-10-03 20:24:29 +00:00
parent 961a88ab43
commit 2ca0c6c677
9 changed files with 131 additions and 129 deletions

View File

@ -2364,12 +2364,6 @@ void bx_dbg_take_command(const char *what, unsigned n)
if (bx_guard.report.dma)
dbg_printf("done\n");
}
else if (! strcmp(what, "irq")) {
BX_CPU(0)->dbg_take_irq();
if (bx_guard.report.irq)
dbg_printf("done\n");
}
else {
dbg_printf("Error: Take '%s' not understood.\n", what);
}

View File

@ -251,8 +251,6 @@ void bx_local_apic_c::reset(unsigned type)
mode = BX_APIC_XAPIC_MODE;
INTR = 0;
if (xapic)
apic_version_id = 0x00050014; // P4 has 6 LVT entries
else
@ -781,7 +779,9 @@ void bx_local_apic_c::service_local_apic(void)
BX_INFO(("service_local_apic()"));
print_status();
}
if(INTR) return; // INTR already up; do nothing
if(cpu->is_pending(BX_EVENT_PENDING_LAPIC_INTR)) return; // INTR already up; do nothing
// find first interrupt in irr.
int first_irr = highest_priority_int(irr);
if (first_irr < 0) return; // no interrupts, leave INTR=0
@ -798,8 +798,7 @@ void bx_local_apic_c::service_local_apic(void)
// acknowledges, we will run highest_priority_int again and
// return it.
BX_DEBUG(("service_local_apic(): setting INTR=1 for vector 0x%02x", first_irr));
INTR = 1;
cpu->async_event = 1;
cpu->signal_event(BX_EVENT_PENDING_LAPIC_INTR);
}
bx_bool bx_local_apic_c::deliver(Bit8u vector, Bit8u delivery_mode, Bit8u trig_mode)
@ -877,7 +876,7 @@ void bx_local_apic_c::untrigger_irq(Bit8u vector, unsigned trigger_mode)
Bit8u bx_local_apic_c::acknowledge_int(void)
{
// CPU calls this when it is ready to service one interrupt
if(!INTR)
if(! cpu->is_pending(BX_EVENT_PENDING_LAPIC_INTR))
BX_PANIC(("APIC %d acknowledged an interrupt, but INTR=0", apic_id));
int vector = highest_priority_int(irr);
@ -891,12 +890,13 @@ Bit8u bx_local_apic_c::acknowledge_int(void)
BX_INFO(("Status after setting isr:"));
print_status();
}
INTR = 0;
cpu->clear_event(BX_EVENT_PENDING_LAPIC_INTR);
service_local_apic(); // will set INTR again if another is ready
return vector;
spurious:
INTR = 0;
cpu->clear_event(BX_EVENT_PENDING_LAPIC_INTR);
return spurious_vector;
}
@ -1393,7 +1393,6 @@ void bx_local_apic_c::register_state(bx_param_c *parent)
BXRS_DEC_PARAM_SIMPLE(lapic, timer_handle);
BXRS_PARAM_BOOL(lapic, timer_active, timer_active);
BXRS_HEX_PARAM_SIMPLE(lapic, ticksInitial);
BXRS_PARAM_BOOL(lapic, INTR, INTR);
#if BX_SUPPORT_VMX >= 2
BXRS_DEC_PARAM_SIMPLE(lapic, vmx_timer_handle);

View File

@ -472,12 +472,6 @@ extern const char* cpu_mode_string(unsigned cpu_mode);
#define IsValidPhyAddr(addr) ((addr & BX_PHY_ADDRESS_RESERVED_BITS) == 0)
#if BX_SUPPORT_APIC
#define BX_CPU_INTR (BX_CPU_THIS_PTR INTR || BX_CPU_THIS_PTR lapic.INTR)
#else
#define BX_CPU_INTR (BX_CPU_THIS_PTR INTR)
#endif
#define CACHE_LINE_SIZE 64
class BX_CPU_C;
@ -595,16 +589,31 @@ BOCHSAPI extern BX_CPU_C bx_cpu;
} \
}
// assert async_event when IF or TF is set
#define IMPLEMENT_EFLAG_SET_ACCESSOR_IF_TF(name,bitnum) \
BX_CPP_INLINE void BX_CPU_C::assert_##name() { \
// need special handling when IF is set
#define IMPLEMENT_EFLAG_SET_ACCESSOR_IF(bitnum) \
BX_CPP_INLINE void BX_CPU_C::assert_IF() { \
BX_CPU_THIS_PTR eflags |= (1<<bitnum); \
handleInterruptMaskChange(); \
} \
BX_CPP_INLINE void BX_CPU_C::clear_IF() { \
BX_CPU_THIS_PTR eflags &= ~(1<<bitnum); \
handleInterruptMaskChange(); \
} \
BX_CPP_INLINE void BX_CPU_C::set_IF(bx_bool val) { \
if (val) assert_IF(); \
else clear_IF(); \
}
// assert async_event when TF is set
#define IMPLEMENT_EFLAG_SET_ACCESSOR_TF(bitnum) \
BX_CPP_INLINE void BX_CPU_C::assert_TF() { \
BX_CPU_THIS_PTR async_event = 1; \
BX_CPU_THIS_PTR eflags |= (1<<bitnum); \
} \
BX_CPP_INLINE void BX_CPU_C::clear_##name() { \
BX_CPP_INLINE void BX_CPU_C::clear_TF() { \
BX_CPU_THIS_PTR eflags &= ~(1<<bitnum); \
} \
BX_CPP_INLINE void BX_CPU_C::set_##name(bx_bool val) { \
BX_CPP_INLINE void BX_CPU_C::set_TF(bx_bool val) { \
if (val) BX_CPU_THIS_PTR async_event = 1; \
BX_CPU_THIS_PTR eflags = \
(BX_CPU_THIS_PTR eflags&~(1<<bitnum))|((val)<<bitnum); \
@ -1090,15 +1099,17 @@ public: // for now...
#define BX_ACTIVITY_STATE_MWAIT_IF (5)
unsigned activity_state;
#define BX_EVENT_NMI (1<<0)
#define BX_EVENT_SMI (1<<1)
#define BX_EVENT_INIT (1<<2)
#define BX_EVENT_CODE_BREAKPOINT_ASSIST (1<<3)
#define BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED (1<<4)
#define BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING (1<<5)
#define BX_EVENT_VMX_NMI_WINDOW_EXITING (1<<6)
#define BX_EVENT_SVM_VIRQ_PENDING (1<<7)
// later the event list will grow rapidly
#define BX_EVENT_NMI (1 << 0)
#define BX_EVENT_SMI (1 << 1)
#define BX_EVENT_INIT (1 << 2)
#define BX_EVENT_CODE_BREAKPOINT_ASSIST (1 << 3)
#define BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED (1 << 4)
#define BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING (1 << 5)
#define BX_EVENT_VMX_NMI_WINDOW_EXITING (1 << 6)
#define BX_EVENT_SVM_VIRQ_PENDING (1 << 7)
#define BX_EVENT_PENDING_VMX_VIRTUAL_INTR (1 << 8)
#define BX_EVENT_PENDING_INTR (1 << 9)
#define BX_EVENT_PENDING_LAPIC_INTR (1 << 10)
Bit32u pending_event;
Bit32u event_mask;
Bit32u async_event;
@ -1124,13 +1135,6 @@ public: // for now...
return (BX_CPU_THIS_PTR event_mask & event) != 0;
}
BX_SMF BX_CPP_INLINE bx_bool is_pending(void) {
return (BX_CPU_THIS_PTR pending_event) != 0;
}
BX_SMF BX_CPP_INLINE bx_bool is_unmasked_event_pending() {
return (BX_CPU_THIS_PTR pending_event & ~BX_CPU_THIS_PTR event_mask) != 0;
}
BX_SMF BX_CPP_INLINE bx_bool is_pending(Bit32u event) {
return (BX_CPU_THIS_PTR pending_event & event) != 0;
}
@ -1138,6 +1142,10 @@ public: // for now...
return (BX_CPU_THIS_PTR pending_event & ~BX_CPU_THIS_PTR event_mask & event) != 0;
}
BX_SMF BX_CPP_INLINE Bit32u unmasked_events_pending(void) {
return (BX_CPU_THIS_PTR pending_event & ~BX_CPU_THIS_PTR event_mask);
}
#define BX_ASYNC_EVENT_STOP_TRACE (1<<31)
#if BX_X86_DEBUGGER
@ -1146,7 +1154,6 @@ public: // for now...
bx_bool in_smm;
unsigned cpu_mode;
bx_bool user_pl;
bx_bool INTR;
#if BX_CPU_LEVEL >= 5
bx_bool ignore_bad_msrs;
#endif
@ -3564,8 +3571,6 @@ public: // for now...
// <TAG-CLASS-CPU-END>
#if BX_DEBUGGER
BX_SMF void dbg_take_irq(void);
BX_SMF void dbg_force_interrupt(unsigned vector);
BX_SMF void dbg_take_dma(void);
BX_SMF bx_bool dbg_set_reg(unsigned reg, Bit32u val);
BX_SMF bx_bool dbg_get_sreg(bx_dbg_sreg_t *sreg, unsigned sreg_no);
@ -3597,7 +3602,6 @@ public: // for now...
#endif
BX_SMF bx_bool handleAsyncEvent(void);
BX_SMF bx_bool handleWaitForEvent(void);
BX_SMF bx_bool interrupts_enabled(void);
BX_SMF void InterruptAcknowledge(void);
BX_SMF int fetchDecode32(const Bit8u *fetchPtr, bxInstruction_c *i, unsigned remainingInPage) BX_CPP_AttrRegparmN(3);
@ -4014,6 +4018,7 @@ public: // for now...
BX_SMF void shutdown(void);
BX_SMF void handleCpuModeChange(void);
BX_SMF void handleCpuContextChange(void);
BX_SMF void handleInterruptMaskChange(void);
#if BX_CPU_LEVEL >= 4
BX_SMF void handleAlignmentCheck(void);
#endif
@ -4781,20 +4786,20 @@ IMPLEMENT_EFLAG_ACCESSOR (DF, 10)
IMPLEMENT_EFLAG_ACCESSOR (IF, 9)
IMPLEMENT_EFLAG_ACCESSOR (TF, 8)
IMPLEMENT_EFLAG_SET_ACCESSOR (ID, 21)
IMPLEMENT_EFLAG_SET_ACCESSOR (VIP, 20)
IMPLEMENT_EFLAG_SET_ACCESSOR (VIF, 19)
IMPLEMENT_EFLAG_SET_ACCESSOR (ID, 21)
IMPLEMENT_EFLAG_SET_ACCESSOR (VIP, 20)
IMPLEMENT_EFLAG_SET_ACCESSOR (VIF, 19)
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
IMPLEMENT_EFLAG_SET_ACCESSOR_AC ( 18)
IMPLEMENT_EFLAG_SET_ACCESSOR_AC( 18)
#else
IMPLEMENT_EFLAG_SET_ACCESSOR (AC, 18)
IMPLEMENT_EFLAG_SET_ACCESSOR (AC, 18)
#endif
IMPLEMENT_EFLAG_SET_ACCESSOR_VM ( 17)
IMPLEMENT_EFLAG_SET_ACCESSOR_RF ( 16)
IMPLEMENT_EFLAG_SET_ACCESSOR (NT, 14)
IMPLEMENT_EFLAG_SET_ACCESSOR (DF, 10)
IMPLEMENT_EFLAG_SET_ACCESSOR_IF_TF(IF, 9)
IMPLEMENT_EFLAG_SET_ACCESSOR_IF_TF(TF, 8)
IMPLEMENT_EFLAG_SET_ACCESSOR_VM( 17)
IMPLEMENT_EFLAG_SET_ACCESSOR_RF( 16)
IMPLEMENT_EFLAG_SET_ACCESSOR (NT, 14)
IMPLEMENT_EFLAG_SET_ACCESSOR (DF, 10)
IMPLEMENT_EFLAG_SET_ACCESSOR_IF( 9)
IMPLEMENT_EFLAG_SET_ACCESSOR_TF( 8)
#define BX_TASK_FROM_CALL 0
#define BX_TASK_FROM_IRET 1

View File

@ -1315,6 +1315,7 @@ void BX_CPU_C::WriteCR8(bxInstruction_c *i, bx_address val)
#if BX_SUPPORT_SVM
if (BX_CPU_THIS_PTR in_svm_guest) {
SVM_V_TPR = tpr;
handleInterruptMaskChange();
if (SVM_V_INTR_MASKING) return;
}
#endif

View File

@ -33,7 +33,7 @@ bx_bool BX_CPU_C::handleWaitForEvent(void)
// an interrupt wakes up the CPU.
while (1)
{
if ((BX_CPU_INTR && (BX_CPU_THIS_PTR get_IF() || BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_MWAIT_IF)) ||
if ((is_pending(BX_EVENT_PENDING_INTR | BX_EVENT_PENDING_LAPIC_INTR) && (BX_CPU_THIS_PTR get_IF() || BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_MWAIT_IF)) ||
is_pending(BX_EVENT_NMI | BX_EVENT_SMI | BX_EVENT_INIT | BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED | BX_EVENT_VMX_NMI_WINDOW_EXITING))
{
// interrupt ends the HALT condition
@ -87,14 +87,6 @@ bx_bool BX_CPU_C::handleWaitForEvent(void)
return 0;
}
BX_CPP_INLINE bx_bool BX_CPU_C::interrupts_enabled(void)
{
#if BX_SUPPORT_SVM
if (BX_CPU_THIS_PTR in_svm_guest && SVM_V_INTR_MASKING) return SVM_HOST_IF;
#endif
return BX_CPU_THIS_PTR get_IF();
}
void BX_CPU_C::InterruptAcknowledge(void)
{
Bit8u vector;
@ -111,7 +103,7 @@ void BX_CPU_C::InterruptAcknowledge(void)
// NOTE: similar code in ::take_irq()
#if BX_SUPPORT_APIC
if (BX_CPU_THIS_PTR lapic.INTR)
if (is_pending(BX_EVENT_PENDING_LAPIC_INTR))
vector = BX_CPU_THIS_PTR lapic.acknowledge_int();
else
#endif
@ -283,18 +275,12 @@ bx_bool BX_CPU_C::handleAsyncEvent(void)
VMexit(VMX_VMEXIT_INTERRUPT_WINDOW, 0);
}
#endif
else if (BX_CPU_INTR && BX_DBG_ASYNC_INTR &&
(interrupts_enabled()
#if BX_SUPPORT_VMX
|| (BX_CPU_THIS_PTR in_vmx_guest && PIN_VMEXIT(VMX_VM_EXEC_CTRL1_EXTERNAL_INTERRUPT_VMEXIT))
#endif
))
else if (is_unmasked_event_pending(BX_EVENT_PENDING_INTR | BX_EVENT_PENDING_LAPIC_INTR))
{
InterruptAcknowledge();
}
#if BX_SUPPORT_SVM
else if (is_unmasked_event_pending(BX_EVENT_SVM_VIRQ_PENDING) && BX_CPU_THIS_PTR get_IF() &&
((SVM_V_INTR_PRIO > SVM_V_TPR) || SVM_V_IGNORE_TPR))
else if (is_unmasked_event_pending(BX_EVENT_SVM_VIRQ_PENDING))
{
// virtual interrupt acknowledge
VirtualInterruptAcknowledge();
@ -338,15 +324,9 @@ bx_bool BX_CPU_C::handleAsyncEvent(void)
// Alignment check
// (handled by rest of the code)
if (!((BX_CPU_INTR && interrupts_enabled() && SVM_GIF) ||
BX_CPU_THIS_PTR debug_trap ||
if (!((SVM_GIF && unmasked_events_pending()) || BX_CPU_THIS_PTR debug_trap ||
// BX_CPU_THIS_PTR get_TF() || // implies debug_trap is set
BX_HRQ ||
#if BX_SUPPORT_SVM
(is_unmasked_event_pending(BX_EVENT_SVM_VIRQ_PENDING) && BX_CPU_THIS_PTR get_IF() &&
((SVM_V_INTR_PRIO > SVM_V_TPR) || SVM_V_IGNORE_TPR)) ||
#endif
is_unmasked_event_pending(~BX_EVENT_SVM_VIRQ_PENDING)))
BX_HRQ))
{
BX_CPU_THIS_PTR async_event = 0;
}
@ -400,45 +380,16 @@ void BX_CPU_C::deliver_SMI(void)
void BX_CPU_C::raise_INTR(void)
{
BX_CPU_THIS_PTR INTR = 1;
BX_CPU_THIS_PTR async_event = 1;
signal_event(BX_EVENT_PENDING_INTR);
}
void BX_CPU_C::clear_INTR(void)
{
BX_CPU_THIS_PTR INTR = 0;
clear_event(BX_EVENT_PENDING_INTR);
}
#if BX_DEBUGGER
void BX_CPU_C::dbg_take_irq(void)
{
// NOTE: similar code in ::cpu_loop()
if (BX_CPU_INTR && BX_CPU_THIS_PTR get_IF()) {
if (setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0) {
// normal return from setjmp setup
unsigned vector = DEV_pic_iac(); // may set INTR with next interrupt
BX_CPU_THIS_PTR EXT = 1; // external event
BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
interrupt(vector, BX_EXTERNAL_INTERRUPT, 0, 0);
}
}
}
void BX_CPU_C::dbg_force_interrupt(unsigned vector)
{
// Used to force simulator to take an interrupt, without
// regard to IF
if (setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0) {
// normal return from setjmp setup
BX_CPU_THIS_PTR EXT = 1; // external event
BX_CPU_THIS_PTR async_event = 1; // probably don't need this
interrupt(vector, BX_EXTERNAL_INTERRUPT, 0, 0);
}
}
void BX_CPU_C::dbg_take_dma(void)
{
// NOTE: similar code in ::cpu_loop()

View File

@ -142,7 +142,6 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::STI(bxInstruction_c *i)
if (! BX_CPU_THIS_PTR get_IF()) {
BX_CPU_THIS_PTR assert_IF();
inhibit_interrupts(BX_INHIBIT_INTERRUPTS);
BX_CPU_THIS_PTR async_event = 1;
}
BX_NEXT_INSTR(i);

View File

@ -25,35 +25,38 @@
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
void BX_CPP_AttrRegparmN(1) BX_CPU_C::setEFlags(Bit32u val)
void BX_CPP_AttrRegparmN(1) BX_CPU_C::setEFlags(Bit32u new_eflags)
{
Bit32u eflags = BX_CPU_THIS_PTR eflags;
// VM flag could not be set from long mode
#if BX_SUPPORT_X86_64
if (long_mode()) {
if (BX_CPU_THIS_PTR get_VM()) BX_PANIC(("VM is set in long mode !"));
val &= ~EFlagsVMMask;
new_eflags &= ~EFlagsVMMask;
}
#endif
if (val & EFlagsRFMask) invalidate_prefetch_q();
BX_CPU_THIS_PTR eflags = new_eflags;
setEFlagsOSZAPC(new_eflags); // update lazy flags state
if (val & EFlagsTFMask) {
if (BX_CPU_THIS_PTR get_RF()) invalidate_prefetch_q();
if (BX_CPU_THIS_PTR get_TF()) {
BX_CPU_THIS_PTR async_event = 1; // TF == 1
}
if (val & EFlagsIFMask) {
if (! BX_CPU_THIS_PTR get_IF())
BX_CPU_THIS_PTR async_event = 1; // IF bit was set
if ((eflags ^ new_eflags) & EFlagsIFMask) {
handleInterruptMaskChange();
}
BX_CPU_THIS_PTR eflags = val;
setEFlagsOSZAPC(val); // update lazy flags state
#if BX_CPU_LEVEL >= 4
handleAlignmentCheck(/* EFLAGS.AC reloaded */);
#endif
handleCpuModeChange(); // VM flag might be changed
if ((eflags ^ new_eflags) & EFlagsVMMask) {
handleCpuModeChange(); // VM flag was changed
}
}
void BX_CPP_AttrRegparmN(2)
@ -109,3 +112,51 @@ Bit32u BX_CPU_C::force_flags(void)
return BX_CPU_THIS_PTR eflags;
}
void BX_CPU_C::handleInterruptMaskChange(void)
{
if (BX_CPU_THIS_PTR get_IF()) {
// EFLAGS.IF was set, unmask all affected events
unmask_event(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING |
BX_EVENT_PENDING_INTR |
BX_EVENT_PENDING_LAPIC_INTR |
BX_EVENT_PENDING_VMX_VIRTUAL_INTR);
#if BX_SUPPORT_SVM
if (BX_CPU_THIS_PTR in_svm_guest) {
if ((SVM_V_INTR_PRIO > SVM_V_TPR) || SVM_V_IGNORE_TPR)
unmask_event(BX_EVENT_SVM_VIRQ_PENDING);
}
#endif
return;
}
// EFLAGS.IF was cleared, some events like INTR would be masked
#if BX_SUPPORT_VMX
if (BX_CPU_THIS_PTR in_vmx_guest && PIN_VMEXIT(VMX_VM_EXEC_CTRL1_EXTERNAL_INTERRUPT_VMEXIT)) {
// if 'External-interrupt exiting' control is set, the value of EFLAGS.IF
// doesn't affect interrupt blocking
mask_event(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING | BX_EVENT_PENDING_VMX_VIRTUAL_INTR);
return;
}
#endif
#if BX_SUPPORT_SVM
if (BX_CPU_THIS_PTR in_svm_guest && SVM_V_INTR_MASKING) {
if (! SVM_HOST_IF)
mask_event(BX_EVENT_PENDING_INTR | BX_EVENT_PENDING_LAPIC_INTR);
mask_event(BX_EVENT_SVM_VIRQ_PENDING);
}
else
#endif
{
mask_event(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING |
BX_EVENT_PENDING_INTR |
BX_EVENT_PENDING_LAPIC_INTR |
BX_EVENT_PENDING_VMX_VIRTUAL_INTR |
BX_EVENT_SVM_VIRQ_PENDING);
}
}

View File

@ -187,8 +187,6 @@ static bx_cpuid_t *cpuid_factory(BX_CPU_C *cpu)
// BX_CPU_C constructor
void BX_CPU_C::initialize(void)
{
BX_CPU_THIS_PTR clear_INTR();
#if BX_CPU_LEVEL >= 4
BX_CPU_THIS_PTR cpuid = cpuid_factory(this);
if (! BX_CPU_THIS_PTR cpuid)
@ -627,7 +625,6 @@ void BX_CPU_C::register_state(void)
BXRS_HEX_PARAM_SIMPLE32(cpu, pending_event);
BXRS_HEX_PARAM_SIMPLE32(cpu, event_mask);
BXRS_HEX_PARAM_SIMPLE32(cpu, async_event);
BXRS_PARAM_BOOL(cpu, INTR, INTR);
#if BX_X86_DEBUGGER
BXRS_PARAM_BOOL(cpu, in_repeat, in_repeat);

View File

@ -451,10 +451,15 @@ void BX_CPU_C::handleCpuContextChange(void)
invalidate_prefetch_q();
invalidate_stack_cache();
handleInterruptMaskChange();
#if BX_CPU_LEVEL >= 4
handleAlignmentCheck();
#endif
handleCpuModeChange();
#if BX_CPU_LEVEL >= 6
handleSseModeChange();
#if BX_SUPPORT_AVX