2001-10-03 17:10:38 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2011-02-25 00:54:04 +03:00
|
|
|
// $Id$
|
2001-10-03 17:10:38 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2009-12-04 19:53:12 +03:00
|
|
|
// Copyright (C) 2001-2009 The Bochs Project
|
2001-04-10 05:04:59 +04:00
|
|
|
//
|
|
|
|
// This library is free software; you can redistribute it and/or
|
|
|
|
// modify it under the terms of the GNU Lesser General Public
|
|
|
|
// License as published by the Free Software Foundation; either
|
|
|
|
// version 2 of the License, or (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
// Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public
|
|
|
|
// License along with this library; if not, write to the Free Software
|
2009-01-16 21:18:59 +03:00
|
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
2007-11-18 02:28:33 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2001-05-24 22:46:34 +04:00
|
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
2001-04-10 05:04:59 +04:00
|
|
|
#include "bochs.h"
|
2006-03-07 01:03:16 +03:00
|
|
|
#include "cpu.h"
|
merge in BRANCH-io-cleanup.
To see the commit logs for this use either cvsweb or
cvs update -r BRANCH-io-cleanup and then 'cvs log' the various files.
In general this provides a generic interface for logging.
logfunctions:: is a class that is inherited by some classes, and also
. allocated as a standalone global called 'genlog'. All logging uses
. one of the ::info(), ::error(), ::ldebug(), ::panic() methods of this
. class through 'BX_INFO(), BX_ERROR(), BX_DEBUG(), BX_PANIC()' macros
. respectively.
.
. An example usage:
. BX_INFO(("Hello, World!\n"));
iofunctions:: is a class that is allocated once by default, and assigned
as the iofunction of each logfunctions instance. It is this class that
maintains the file descriptor and other output related code, at this
point using vfprintf(). At some future point, someone may choose to
write a gui 'console' for bochs to which messages would be redirected
simply by assigning a different iofunction class to the various logfunctions
objects.
More cleanup is coming, but this works for now. If you want to see alot
of debugging output, in main.cc, change onoff[LOGLEV_DEBUG]=0 to =1.
Comments, bugs, flames, to me: todd@fries.net
2001-05-15 18:49:57 +04:00
|
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2006-03-07 01:03:16 +03:00
|
|
|
#include "iodev/iodev.h"
|
|
|
|
|
2002-09-14 07:01:05 +04:00
|
|
|
// Make code more tidy with a few macros.
|
|
|
|
#if BX_SUPPORT_X86_64==0
|
|
|
|
#define RIP EIP
|
2007-09-25 20:11:32 +04:00
|
|
|
#define RCX ECX
|
2002-09-14 07:01:05 +04:00
|
|
|
#endif
|
|
|
|
|
2008-03-06 23:22:24 +03:00
|
|
|
#define InstrumentICACHE 0
|
|
|
|
|
|
|
|
#if InstrumentICACHE
|
|
|
|
static unsigned iCacheLookups=0;
|
|
|
|
static unsigned iCacheMisses=0;
|
|
|
|
|
|
|
|
#define InstrICache_StatsMask 0xffffff
|
|
|
|
|
|
|
|
#define InstrICache_Stats() {\
|
|
|
|
if ((iCacheLookups & InstrICache_StatsMask) == 0) { \
|
|
|
|
BX_INFO(("ICACHE lookups: %u, misses: %u, hit rate = %6.2f%% ", \
|
|
|
|
iCacheLookups, \
|
|
|
|
iCacheMisses, \
|
|
|
|
(iCacheLookups-iCacheMisses) * 100.0 / iCacheLookups)); \
|
|
|
|
iCacheLookups = iCacheMisses = 0; \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
#define InstrICache_Increment(v) (v)++
|
|
|
|
#else
|
|
|
|
#define InstrICache_Stats()
|
|
|
|
#define InstrICache_Increment(v)
|
|
|
|
#endif
|
|
|
|
|
2007-09-26 22:07:39 +04:00
|
|
|
// The CHECK_MAX_INSTRUCTIONS macro allows cpu_loop to execute a few
|
|
|
|
// instructions and then return so that the other processors have a chance to
|
2008-02-03 00:46:54 +03:00
|
|
|
// run. This is used by bochs internal debugger or when simulating
|
2007-09-26 22:07:39 +04:00
|
|
|
// multiple processors.
|
2008-02-03 00:46:54 +03:00
|
|
|
//
|
2007-09-26 22:07:39 +04:00
|
|
|
// If maximum instructions have been executed, return. The zero-count
|
|
|
|
// means run forever.
|
|
|
|
#if BX_SUPPORT_SMP || BX_DEBUGGER
|
|
|
|
#define CHECK_MAX_INSTRUCTIONS(count) \
|
|
|
|
if ((count) > 0) { \
|
|
|
|
(count)--; \
|
|
|
|
if ((count) == 0) return; \
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define CHECK_MAX_INSTRUCTIONS(count)
|
|
|
|
#endif
|
|
|
|
|
2006-04-29 11:12:13 +04:00
|
|
|
void BX_CPU_C::cpu_loop(Bit32u max_instr_count)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR break_point = 0;
|
|
|
|
BX_CPU_THIS_PTR magic_break = 0;
|
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_NO_REASON;
|
|
|
|
#endif
|
|
|
|
|
2008-04-18 14:19:33 +04:00
|
|
|
if (setjmp(BX_CPU_THIS_PTR jmp_buf_env)) {
|
2007-09-26 22:07:39 +04:00
|
|
|
// only from exception function we can get here ...
|
2003-02-13 18:04:11 +03:00
|
|
|
BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID);
|
2008-04-18 14:19:33 +04:00
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
2008-10-01 13:44:40 +04:00
|
|
|
#if BX_DEBUGGER || BX_GDBSTUB
|
2007-09-26 22:07:39 +04:00
|
|
|
if (dbg_instruction_epilog()) return;
|
|
|
|
#endif
|
|
|
|
CHECK_MAX_INSTRUCTIONS(max_instr_count);
|
2005-04-12 22:08:10 +04:00
|
|
|
#if BX_GDBSTUB
|
2007-11-28 01:12:45 +03:00
|
|
|
if (bx_dbg.gdbstub_enabled) return;
|
2002-09-29 18:16:30 +04:00
|
|
|
#endif
|
2005-04-12 22:08:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2002-10-07 02:08:18 +04:00
|
|
|
// If the exception() routine has encountered a nasty fault scenario,
|
|
|
|
// the debugger may request that control is returned to it so that
|
|
|
|
// the situation may be examined.
|
2008-04-07 22:39:17 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
if (bx_guard.interrupt_requested) return;
|
2002-10-07 02:08:18 +04:00
|
|
|
#endif
|
|
|
|
|
2002-09-02 22:44:35 +04:00
|
|
|
// We get here either by a normal function call, or by a longjmp
|
|
|
|
// back from an exception() call. In either case, commit the
|
|
|
|
// new EIP/ESP, and set up other environmental fields. This code
|
|
|
|
// mirrors similar code below, after the interrupt() call.
|
2007-11-24 17:22:34 +03:00
|
|
|
BX_CPU_THIS_PTR prev_rip = RIP; // commit new EIP
|
|
|
|
BX_CPU_THIS_PTR speculative_rsp = 0;
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR EXT = 0;
|
|
|
|
|
2002-10-04 20:26:10 +04:00
|
|
|
while (1) {
|
2002-09-02 22:44:35 +04:00
|
|
|
|
2008-03-23 00:29:41 +03:00
|
|
|
// check on events which occurred for previous instructions (traps)
|
|
|
|
// and ones which are asynchronous to the CPU (hardware interrupts)
|
2006-05-24 20:46:57 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event) {
|
|
|
|
if (handleAsyncEvent()) {
|
|
|
|
// If request to return to caller ASAP.
|
|
|
|
return;
|
|
|
|
}
|
2002-10-04 20:26:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2011-01-26 14:48:13 +03:00
|
|
|
bxICacheEntry_c *entry = getICacheEntry();
|
2008-01-28 23:09:40 +03:00
|
|
|
|
2008-03-06 23:22:24 +03:00
|
|
|
bxInstruction_c *i = entry->i;
|
|
|
|
|
|
|
|
#if BX_SUPPORT_TRACE_CACHE
|
2010-02-13 12:41:51 +03:00
|
|
|
bxInstruction_c *last = i + (entry->tlen);
|
2007-12-09 21:36:05 +03:00
|
|
|
|
2009-04-06 22:14:20 +04:00
|
|
|
for(;;) {
|
2007-12-09 21:36:05 +03:00
|
|
|
#endif
|
2008-06-23 06:56:31 +04:00
|
|
|
|
2006-04-05 21:31:35 +04:00
|
|
|
#if BX_DISASM
|
2007-12-09 21:36:05 +03:00
|
|
|
if (BX_CPU_THIS_PTR trace) {
|
|
|
|
// print the instruction that is about to be executed
|
|
|
|
debug_disasm_instruction(BX_CPU_THIS_PTR prev_rip);
|
|
|
|
}
|
2005-12-12 22:54:48 +03:00
|
|
|
#endif
|
2002-02-16 01:58:06 +03:00
|
|
|
|
2010-01-24 15:48:42 +03:00
|
|
|
// instruction decoding completed -> continue with execution
|
|
|
|
// want to allow changing of the instruction inside instrumentation callback
|
2007-12-09 21:36:05 +03:00
|
|
|
BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
|
2009-03-26 12:44:23 +03:00
|
|
|
RIP += i->ilen();
|
2010-01-24 15:48:42 +03:00
|
|
|
BX_CPU_CALL_METHOD(i->execute, (i)); // might iterate repeat instruction
|
2007-12-09 21:36:05 +03:00
|
|
|
BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
|
|
|
|
BX_INSTR_AFTER_EXECUTION(BX_CPU_ID, i);
|
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
2006-06-24 22:27:11 +04:00
|
|
|
|
2007-12-09 21:36:05 +03:00
|
|
|
// inform instrumentation about new instruction
|
|
|
|
BX_INSTR_NEW_INSTRUCTION(BX_CPU_ID);
|
2002-09-28 04:54:05 +04:00
|
|
|
|
2007-12-09 21:36:05 +03:00
|
|
|
// note instructions generating exceptions never reach this point
|
2008-10-01 13:44:40 +04:00
|
|
|
#if BX_DEBUGGER || BX_GDBSTUB
|
2007-12-09 21:36:05 +03:00
|
|
|
if (dbg_instruction_epilog()) return;
|
2006-04-29 11:12:13 +04:00
|
|
|
#endif
|
|
|
|
|
2007-12-09 21:36:05 +03:00
|
|
|
CHECK_MAX_INSTRUCTIONS(max_instr_count);
|
|
|
|
|
|
|
|
#if BX_SUPPORT_TRACE_CACHE
|
2008-01-28 23:09:40 +03:00
|
|
|
if (BX_CPU_THIS_PTR async_event) {
|
2008-03-31 22:53:08 +04:00
|
|
|
// clear stop trace magic indication that probably was set by repeat or branch32/64
|
2008-01-28 23:09:40 +03:00
|
|
|
BX_CPU_THIS_PTR async_event &= ~BX_ASYNC_EVENT_STOP_TRACE;
|
|
|
|
break;
|
|
|
|
}
|
2007-12-09 21:36:05 +03:00
|
|
|
|
2011-01-26 14:48:13 +03:00
|
|
|
if (++i == last) {
|
|
|
|
entry = getICacheEntry();
|
|
|
|
i = entry->i;
|
|
|
|
last = i + (entry->tlen);
|
|
|
|
}
|
2007-12-09 21:36:05 +03:00
|
|
|
}
|
|
|
|
#endif
|
2002-10-04 20:26:10 +04:00
|
|
|
} // while (1)
|
|
|
|
}
|
2002-09-19 23:17:20 +04:00
|
|
|
|
2011-01-26 14:48:13 +03:00
|
|
|
bxICacheEntry_c* BX_CPU_C::getICacheEntry(void)
|
|
|
|
{
|
|
|
|
bx_address eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
|
|
|
|
|
|
|
if (eipBiased >= BX_CPU_THIS_PTR eipPageWindowSize) {
|
|
|
|
prefetch();
|
|
|
|
eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
|
|
|
}
|
|
|
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrPage + eipBiased;
|
|
|
|
bxICacheEntry_c *entry = BX_CPU_THIS_PTR iCache.get_entry(pAddr, BX_CPU_THIS_PTR fetchModeMask);
|
|
|
|
|
|
|
|
InstrICache_Increment(iCacheLookups);
|
|
|
|
InstrICache_Stats();
|
|
|
|
|
|
|
|
if (entry->pAddr != pAddr)
|
|
|
|
{
|
|
|
|
// iCache miss. No validated instruction with matching fetch parameters
|
|
|
|
// is in the iCache.
|
|
|
|
InstrICache_Increment(iCacheMisses);
|
|
|
|
serveICacheMiss(entry, (Bit32u) eipBiased, pAddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
2008-03-23 00:29:41 +03:00
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat(bxInstruction_c *i, BxExecutePtr_tR execute)
|
2007-01-05 16:40:47 +03:00
|
|
|
{
|
|
|
|
// non repeated instruction
|
|
|
|
if (! i->repUsedL()) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-30 12:13:19 +03:00
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR in_repeat = 0;
|
|
|
|
#endif
|
|
|
|
|
2007-01-05 16:40:47 +03:00
|
|
|
#if BX_SUPPORT_X86_64
|
2007-10-31 01:15:42 +03:00
|
|
|
if (i->as64L()) {
|
|
|
|
while(1) {
|
2007-01-05 16:40:47 +03:00
|
|
|
if (RCX != 0) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
RCX --;
|
|
|
|
}
|
|
|
|
if (RCX == 0) return;
|
2007-10-31 01:15:42 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
|
|
|
#endif
|
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
2007-10-31 01:15:42 +03:00
|
|
|
}
|
|
|
|
else
|
2007-01-05 16:40:47 +03:00
|
|
|
#endif
|
2007-10-31 01:15:42 +03:00
|
|
|
if (i->as32L()) {
|
|
|
|
while(1) {
|
2007-01-05 16:40:47 +03:00
|
|
|
if (ECX != 0) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
2007-09-25 20:11:32 +04:00
|
|
|
RCX = ECX - 1;
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
|
|
|
if (ECX == 0) return;
|
2007-10-31 01:15:42 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
|
|
|
#endif
|
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
2007-10-31 01:15:42 +03:00
|
|
|
}
|
|
|
|
else // 16bit addrsize
|
|
|
|
{
|
|
|
|
while(1) {
|
2007-01-05 16:40:47 +03:00
|
|
|
if (CX != 0) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
CX --;
|
|
|
|
}
|
|
|
|
if (CX == 0) return;
|
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2007-10-31 01:15:42 +03:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2007-01-05 16:40:47 +03:00
|
|
|
#endif
|
2007-10-31 01:15:42 +03:00
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
2007-10-31 01:15:42 +03:00
|
|
|
}
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
|
|
|
|
2009-10-30 12:13:19 +03:00
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR in_repeat = 1;
|
|
|
|
#endif
|
|
|
|
|
2007-11-24 17:22:34 +03:00
|
|
|
RIP = BX_CPU_THIS_PTR prev_rip; // repeat loop not done, restore RIP
|
2008-03-31 22:53:08 +04:00
|
|
|
|
|
|
|
#if BX_SUPPORT_TRACE_CACHE
|
|
|
|
// assert magic async_event to stop trace execution
|
|
|
|
BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
|
|
|
|
#endif
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
|
|
|
|
2008-10-09 00:15:37 +04:00
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c *i, BxExecutePtr_tR execute)
|
|
|
|
{
|
2008-10-09 00:40:26 +04:00
|
|
|
unsigned rep = i->repUsedValue();
|
2008-10-09 00:15:37 +04:00
|
|
|
|
2008-10-09 00:40:26 +04:00
|
|
|
// non repeated instruction
|
|
|
|
if (! rep) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-30 12:13:19 +03:00
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR in_repeat = 0;
|
|
|
|
#endif
|
|
|
|
|
2008-10-09 00:40:26 +04:00
|
|
|
if (rep == 3) { /* repeat prefix 0xF3 */
|
2008-10-09 00:15:37 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
2008-10-09 00:40:26 +04:00
|
|
|
if (i->as64L()) {
|
|
|
|
while(1) {
|
|
|
|
if (RCX != 0) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
RCX --;
|
|
|
|
}
|
|
|
|
if (! get_ZF() || RCX == 0) return;
|
2008-10-09 00:15:37 +04:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2008-10-09 00:15:37 +04:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-10-09 00:15:37 +04:00
|
|
|
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
|
|
|
}
|
2008-10-09 00:15:37 +04:00
|
|
|
}
|
2008-10-09 00:40:26 +04:00
|
|
|
else
|
2008-10-09 00:15:37 +04:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
if (i->as32L()) {
|
|
|
|
while(1) {
|
|
|
|
if (ECX != 0) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
RCX = ECX - 1;
|
|
|
|
}
|
|
|
|
if (! get_ZF() || ECX == 0) return;
|
2008-10-09 00:15:37 +04:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2008-10-09 00:15:37 +04:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-10-09 00:15:37 +04:00
|
|
|
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
2008-10-09 00:15:37 +04:00
|
|
|
}
|
2008-10-09 00:40:26 +04:00
|
|
|
}
|
|
|
|
else // 16bit addrsize
|
|
|
|
{
|
|
|
|
while(1) {
|
|
|
|
if (CX != 0) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
CX --;
|
|
|
|
}
|
|
|
|
if (! get_ZF() || CX == 0) return;
|
2008-10-09 00:15:37 +04:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2008-10-09 00:15:37 +04:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-10-09 00:15:37 +04:00
|
|
|
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
|
|
|
}
|
2008-10-09 00:15:37 +04:00
|
|
|
}
|
|
|
|
}
|
2008-10-09 00:40:26 +04:00
|
|
|
else { /* repeat prefix 0xF2 */
|
2007-01-05 16:40:47 +03:00
|
|
|
#if BX_SUPPORT_X86_64
|
2008-10-09 00:40:26 +04:00
|
|
|
if (i->as64L()) {
|
|
|
|
while(1) {
|
|
|
|
if (RCX != 0) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
RCX --;
|
|
|
|
}
|
|
|
|
if (get_ZF() || RCX == 0) return;
|
2007-10-31 01:15:42 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2007-10-31 01:15:42 +03:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
|
|
|
}
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
2008-10-09 00:40:26 +04:00
|
|
|
else
|
2007-01-05 16:40:47 +03:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
if (i->as32L()) {
|
|
|
|
while(1) {
|
|
|
|
if (ECX != 0) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
RCX = ECX - 1;
|
|
|
|
}
|
|
|
|
if (get_ZF() || ECX == 0) return;
|
2007-10-31 01:15:42 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2007-10-31 01:15:42 +03:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
2008-10-09 00:40:26 +04:00
|
|
|
}
|
|
|
|
else // 16bit addrsize
|
|
|
|
{
|
|
|
|
while(1) {
|
|
|
|
if (CX != 0) {
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
CX --;
|
|
|
|
}
|
|
|
|
if (get_ZF() || CX == 0) return;
|
2007-01-05 16:40:47 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2007-01-05 16:40:47 +03:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
|
|
|
}
|
2007-10-31 01:15:42 +03:00
|
|
|
}
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
|
|
|
|
2009-10-30 12:13:19 +03:00
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR in_repeat = 1;
|
|
|
|
#endif
|
|
|
|
|
2007-11-24 17:22:34 +03:00
|
|
|
RIP = BX_CPU_THIS_PTR prev_rip; // repeat loop not done, restore RIP
|
2008-03-31 22:53:08 +04:00
|
|
|
|
|
|
|
#if BX_SUPPORT_TRACE_CACHE
|
|
|
|
// assert magic async_event to stop trace execution
|
|
|
|
BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
|
|
|
|
#endif
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
|
|
|
|
2005-02-12 17:00:13 +03:00
|
|
|
unsigned BX_CPU_C::handleAsyncEvent(void)
|
2002-10-04 20:26:10 +04:00
|
|
|
{
|
2001-04-10 05:04:59 +04:00
|
|
|
//
|
|
|
|
// This area is where we process special conditions and events.
|
|
|
|
//
|
2009-01-29 23:27:57 +03:00
|
|
|
if (BX_CPU_THIS_PTR activity_state) {
|
|
|
|
// For one processor, pass the time as quickly as possible until
|
2001-06-05 19:56:19 +04:00
|
|
|
// an interrupt wakes up the CPU.
|
2002-09-14 07:01:05 +04:00
|
|
|
while (1)
|
2005-02-12 17:00:13 +03:00
|
|
|
{
|
2009-01-29 23:27:57 +03:00
|
|
|
if ((BX_CPU_INTR && (BX_CPU_THIS_PTR get_IF() ||
|
|
|
|
(BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_MWAIT_IF))) ||
|
2009-02-03 22:17:15 +03:00
|
|
|
BX_CPU_THIS_PTR pending_NMI || BX_CPU_THIS_PTR pending_SMI || BX_CPU_THIS_PTR pending_INIT)
|
2006-03-16 23:24:09 +03:00
|
|
|
{
|
|
|
|
// interrupt ends the HALT condition
|
2007-11-01 21:03:48 +03:00
|
|
|
#if BX_SUPPORT_MONITOR_MWAIT
|
2009-01-29 23:27:57 +03:00
|
|
|
if (BX_CPU_THIS_PTR activity_state >= BX_ACTIVITY_STATE_MWAIT)
|
2010-03-07 12:22:20 +03:00
|
|
|
BX_CPU_THIS_PTR monitor.reset_monitor();
|
2007-11-01 21:03:48 +03:00
|
|
|
#endif
|
2009-01-29 23:27:57 +03:00
|
|
|
BX_CPU_THIS_PTR activity_state = 0;
|
2006-04-10 23:05:21 +04:00
|
|
|
BX_CPU_THIS_PTR inhibit_mask = 0; // clear inhibits for after resume
|
2001-06-05 19:56:19 +04:00
|
|
|
break;
|
2005-02-12 17:00:13 +03:00
|
|
|
}
|
2010-04-02 21:15:14 +04:00
|
|
|
|
2009-01-29 23:27:57 +03:00
|
|
|
if (BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_ACTIVE) {
|
2006-04-10 23:05:21 +04:00
|
|
|
BX_INFO(("handleAsyncEvent: reset detected in HLT state"));
|
2002-09-12 11:16:37 +04:00
|
|
|
break;
|
2002-09-14 07:01:05 +04:00
|
|
|
}
|
2008-02-29 08:39:40 +03:00
|
|
|
|
2010-04-02 21:15:14 +04:00
|
|
|
if (BX_HRQ && BX_DBG_ASYNC_DMA) {
|
|
|
|
// handle DMA also when CPU is halted
|
|
|
|
DEV_dma_raise_hlda();
|
|
|
|
}
|
|
|
|
|
2007-12-08 12:26:13 +03:00
|
|
|
// for multiprocessor simulation, even if this CPU is halted we still
|
2008-02-03 00:46:54 +03:00
|
|
|
// must give the others a chance to simulate. If an interrupt has
|
2007-12-08 12:26:13 +03:00
|
|
|
// arrived, then clear the HALT condition; otherwise just return from
|
|
|
|
// the CPU loop with stop_reason STOP_CPU_HALTED.
|
|
|
|
#if BX_SUPPORT_SMP
|
|
|
|
if (BX_SMP_PROCESSORS > 1) {
|
|
|
|
// HALT condition remains, return so other CPUs have a chance
|
2001-05-23 12:16:07 +04:00
|
|
|
#if BX_DEBUGGER
|
2007-12-08 12:26:13 +03:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
|
2001-05-23 12:16:07 +04:00
|
|
|
#endif
|
2007-12-08 12:26:13 +03:00
|
|
|
return 1; // Return to caller of cpu_loop.
|
|
|
|
}
|
2001-06-05 19:56:19 +04:00
|
|
|
#endif
|
2008-02-29 08:39:40 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
if (bx_guard.interrupt_requested)
|
|
|
|
return 1; // Return to caller of cpu_loop.
|
|
|
|
#endif
|
|
|
|
|
2010-05-23 09:32:00 +04:00
|
|
|
BX_TICKN(10); // when in HLT run time faster for single CPU
|
2007-12-08 12:26:13 +03:00
|
|
|
}
|
2006-03-14 21:11:22 +03:00
|
|
|
} else if (bx_pc_system.kill_bochs_request) {
|
2002-04-18 04:22:20 +04:00
|
|
|
// setting kill_bochs_request causes the cpu loop to return ASAP.
|
2002-10-05 18:51:25 +04:00
|
|
|
return 1; // Return to caller of cpu_loop.
|
2001-05-23 12:16:07 +04:00
|
|
|
}
|
|
|
|
|
2009-05-21 14:39:40 +04:00
|
|
|
// VMLAUNCH/VMRESUME cannot be executed with interrupts inhibited.
|
|
|
|
// Save inhibit interrupts state into shadow bits after clearing
|
|
|
|
BX_CPU_THIS_PTR inhibit_mask = (BX_CPU_THIS_PTR inhibit_mask << 2) & 0xF;
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
// Priority 1: Hardware Reset and Machine Checks
|
|
|
|
// RESET
|
|
|
|
// Machine Check
|
|
|
|
// (bochs doesn't support these)
|
|
|
|
|
|
|
|
// Priority 2: Trap on Task Switch
|
|
|
|
// T flag in TSS is set
|
2009-02-01 23:47:06 +03:00
|
|
|
if (BX_CPU_THIS_PTR debug_trap & BX_DEBUG_TRAP_TASK_SWITCH_BIT)
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_DB_EXCEPTION, 0); // no error, not interrupt
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// Priority 3: External Hardware Interventions
|
|
|
|
// FLUSH
|
|
|
|
// STOPCLK
|
|
|
|
// SMI
|
|
|
|
// INIT
|
2008-12-01 22:06:14 +03:00
|
|
|
if (BX_CPU_THIS_PTR pending_SMI && ! BX_CPU_THIS_PTR smm_mode())
|
2006-02-14 22:00:08 +03:00
|
|
|
{
|
2006-04-05 21:31:35 +04:00
|
|
|
// clear SMI pending flag and disable NMI when SMM was accepted
|
2008-12-01 22:06:14 +03:00
|
|
|
BX_CPU_THIS_PTR pending_SMI = 0;
|
2006-04-05 21:31:35 +04:00
|
|
|
enter_system_management_mode();
|
2006-02-14 22:00:08 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2009-02-03 22:17:15 +03:00
|
|
|
if (BX_CPU_THIS_PTR pending_INIT && ! BX_CPU_THIS_PTR disable_INIT) {
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
|
|
|
BX_ERROR(("VMEXIT: INIT pin asserted"));
|
|
|
|
VMexit(0, VMX_VMEXIT_INIT, 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
// reset will clear pending INIT
|
|
|
|
BX_CPU_THIS_PTR reset(BX_RESET_SOFTWARE);
|
|
|
|
}
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
// Priority 4: Traps on Previous Instruction
|
|
|
|
// Breakpoints
|
|
|
|
// Debug Trap Exceptions (TF flag set or data/IO breakpoint)
|
2009-02-03 22:17:15 +03:00
|
|
|
if (BX_CPU_THIS_PTR debug_trap &&
|
2009-05-21 14:39:40 +04:00
|
|
|
!(BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG_SHADOW))
|
2009-02-03 22:17:15 +03:00
|
|
|
{
|
|
|
|
// A trap may be inhibited on this boundary due to an instruction
|
|
|
|
// which loaded SS. If so we clear the inhibit_mask below
|
|
|
|
// and don't execute this code until the next boundary.
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_DB_EXCEPTION, 0); // no error, not interrupt
|
2009-02-03 22:17:15 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// Priority 5: External Interrupts
|
|
|
|
// NMI Interrupts
|
|
|
|
// Maskable Hardware Interrupts
|
2009-05-21 14:39:40 +04:00
|
|
|
if (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_INTERRUPTS_SHADOW) {
|
2001-04-10 05:04:59 +04:00
|
|
|
// Processing external interrupts is inhibited on this
|
|
|
|
// boundary because of certain instructions like STI.
|
|
|
|
// inhibit_mask is cleared below, in which case we will have
|
|
|
|
// an opportunity to check interrupts on the next instruction
|
|
|
|
// boundary.
|
2005-02-12 17:00:13 +03:00
|
|
|
}
|
2009-02-02 21:59:44 +03:00
|
|
|
#if BX_SUPPORT_VMX
|
2009-02-03 22:17:15 +03:00
|
|
|
else if (! BX_CPU_THIS_PTR disable_NMI && BX_CPU_THIS_PTR in_vmx_guest &&
|
|
|
|
VMEXIT(VMX_VM_EXEC_CTRL2_NMI_WINDOW_VMEXIT))
|
2009-02-02 21:59:44 +03:00
|
|
|
{
|
2009-02-03 22:17:15 +03:00
|
|
|
// NMI-window exiting
|
|
|
|
BX_ERROR(("VMEXIT: NMI window exiting"));
|
|
|
|
VMexit(0, VMX_VMEXIT_NMI_WINDOW, 0);
|
2009-02-02 21:59:44 +03:00
|
|
|
}
|
|
|
|
#endif
|
2009-02-03 22:17:15 +03:00
|
|
|
else if (BX_CPU_THIS_PTR pending_NMI && ! BX_CPU_THIS_PTR disable_NMI) {
|
2008-12-01 22:06:14 +03:00
|
|
|
BX_CPU_THIS_PTR pending_NMI = 0;
|
|
|
|
BX_CPU_THIS_PTR disable_NMI = 1;
|
2006-03-16 23:24:09 +03:00
|
|
|
BX_CPU_THIS_PTR EXT = 1; /* external event */
|
2009-01-31 13:43:24 +03:00
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
VMexit_Event(0, BX_NMI, 2, 0, 0);
|
|
|
|
#endif
|
2006-03-16 23:24:09 +03:00
|
|
|
BX_INSTR_HWINTERRUPT(BX_CPU_ID, 2, BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
|
2009-01-21 00:28:43 +03:00
|
|
|
interrupt(2, BX_NMI, 0, 0);
|
2006-03-16 23:24:09 +03:00
|
|
|
}
|
2009-02-03 22:17:15 +03:00
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
else if (BX_CPU_THIS_PTR vmx_interrupt_window && BX_CPU_THIS_PTR get_IF()) {
|
|
|
|
// interrupt-window exiting
|
|
|
|
BX_ERROR(("VMEXIT: interrupt window exiting"));
|
|
|
|
VMexit(0, VMX_VMEXIT_INTERRUPT_WINDOW, 0);
|
|
|
|
}
|
|
|
|
#endif
|
2010-06-02 09:44:12 +04:00
|
|
|
else if (BX_CPU_INTR && BX_DBG_ASYNC_INTR &&
|
|
|
|
(BX_CPU_THIS_PTR get_IF()
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|| (BX_CPU_THIS_PTR in_vmx_guest && PIN_VMEXIT(VMX_VM_EXEC_CTRL1_EXTERNAL_INTERRUPT_VMEXIT))
|
|
|
|
#endif
|
|
|
|
))
|
2005-02-12 17:00:13 +03:00
|
|
|
{
|
2001-04-10 05:04:59 +04:00
|
|
|
Bit8u vector;
|
2009-01-31 13:43:24 +03:00
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
VMexit_ExtInterrupt();
|
|
|
|
#endif
|
2001-04-10 05:04:59 +04:00
|
|
|
// NOTE: similar code in ::take_irq()
|
2001-06-12 17:07:43 +04:00
|
|
|
#if BX_SUPPORT_APIC
|
2009-02-17 22:20:47 +03:00
|
|
|
if (BX_CPU_THIS_PTR lapic.INTR)
|
|
|
|
vector = BX_CPU_THIS_PTR lapic.acknowledge_int();
|
2001-05-23 12:16:07 +04:00
|
|
|
else
|
|
|
|
#endif
|
2007-12-04 00:43:14 +03:00
|
|
|
// if no local APIC, always acknowledge the PIC.
|
|
|
|
vector = DEV_pic_iac(); // may set INTR with next interrupt
|
2006-02-01 21:12:08 +03:00
|
|
|
BX_CPU_THIS_PTR EXT = 1; /* external event */
|
2009-01-31 13:43:24 +03:00
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
VMexit_Event(0, BX_EXTERNAL_INTERRUPT, vector, 0, 0);
|
|
|
|
#endif
|
2003-02-13 18:04:11 +03:00
|
|
|
BX_INSTR_HWINTERRUPT(BX_CPU_ID, vector,
|
2006-03-16 23:24:09 +03:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
|
2009-01-21 00:28:43 +03:00
|
|
|
interrupt(vector, BX_EXTERNAL_INTERRUPT, 0, 0);
|
2002-09-02 22:44:35 +04:00
|
|
|
// Set up environment, as would be when this main cpu loop gets
|
|
|
|
// invoked. At the end of normal instructions, we always commmit
|
2008-08-12 23:25:42 +04:00
|
|
|
// the new EIP. But here, we call interrupt() much like
|
2002-09-02 22:44:35 +04:00
|
|
|
// it was a sofware interrupt instruction, and need to effect the
|
|
|
|
// commit here. This code mirrors similar code above.
|
2007-11-24 17:22:34 +03:00
|
|
|
BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
|
2002-09-02 22:44:35 +04:00
|
|
|
BX_CPU_THIS_PTR EXT = 0;
|
2005-02-12 17:00:13 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
else if (BX_HRQ && BX_DBG_ASYNC_DMA) {
|
|
|
|
// NOTE: similar code in ::take_dma()
|
|
|
|
// assert Hold Acknowledge (HLDA) and go into a bus hold state
|
2002-10-25 01:07:56 +04:00
|
|
|
DEV_dma_raise_hlda();
|
2005-02-12 17:00:13 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// Priority 6: Faults from fetching next instruction
|
|
|
|
// Code breakpoint fault
|
|
|
|
// Code segment limit violation (priority 7 on 486/Pentium)
|
|
|
|
// Code page fault (priority 7 on 486/Pentium)
|
|
|
|
// (handled in main decode loop)
|
|
|
|
|
2002-09-20 07:52:59 +04:00
|
|
|
// Now we can handle things which are synchronous to instruction
|
|
|
|
// execution.
|
2006-03-16 23:24:09 +03:00
|
|
|
if (BX_CPU_THIS_PTR get_RF()) {
|
|
|
|
BX_CPU_THIS_PTR clear_RF();
|
2005-02-12 17:00:13 +03:00
|
|
|
}
|
2002-09-20 07:52:59 +04:00
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
else {
|
2009-10-24 15:24:21 +04:00
|
|
|
// only bother comparing if any breakpoints enabled and
|
|
|
|
// debug events are not inhibited on this boundary.
|
2009-10-30 12:13:19 +03:00
|
|
|
if (! (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG_SHADOW) && ! BX_CPU_THIS_PTR in_repeat) {
|
2011-02-26 23:43:11 +03:00
|
|
|
code_breakpoint_match(get_laddr(BX_SEG_REG_CS, BX_CPU_THIS_PTR prev_rip));
|
2002-09-20 07:52:59 +04:00
|
|
|
}
|
2005-02-12 17:00:13 +03:00
|
|
|
}
|
2002-09-20 07:52:59 +04:00
|
|
|
#endif
|
|
|
|
|
2009-08-07 09:55:45 +04:00
|
|
|
if (BX_CPU_THIS_PTR get_TF())
|
|
|
|
{
|
|
|
|
// TF is set before execution of next instruction. Schedule
|
|
|
|
// a debug trap (#DB) after execution. After completion of
|
|
|
|
// next instruction, the code above will invoke the trap.
|
|
|
|
BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_SINGLE_STEP_BIT;
|
|
|
|
}
|
|
|
|
|
2011-02-26 23:43:11 +03:00
|
|
|
// Priority 7: Faults from decoding next instruction
|
|
|
|
// Instruction length > 15 bytes
|
|
|
|
// Illegal opcode
|
|
|
|
// Coprocessor not available
|
|
|
|
// (handled in main decode loop etc)
|
|
|
|
|
|
|
|
// Priority 8: Faults on executing an instruction
|
|
|
|
// Floating point execution
|
|
|
|
// Overflow
|
|
|
|
// Bound error
|
|
|
|
// Invalid TSS
|
|
|
|
// Segment not present
|
|
|
|
// Stack fault
|
|
|
|
// General protection
|
|
|
|
// Data page fault
|
|
|
|
// Alignment check
|
|
|
|
// (handled by rest of the code)
|
|
|
|
|
2008-08-12 23:25:42 +04:00
|
|
|
if (!((BX_CPU_INTR && BX_CPU_THIS_PTR get_IF()) ||
|
2008-02-16 01:05:43 +03:00
|
|
|
BX_CPU_THIS_PTR debug_trap ||
|
2009-08-07 09:55:45 +04:00
|
|
|
// BX_CPU_THIS_PTR get_TF() // implies debug_trap is set
|
|
|
|
BX_HRQ
|
2009-01-31 13:43:24 +03:00
|
|
|
#if BX_SUPPORT_VMX
|
2009-05-21 14:39:40 +04:00
|
|
|
|| BX_CPU_THIS_PTR vmx_interrupt_window || BX_CPU_THIS_PTR inhibit_mask
|
2009-05-07 16:00:02 +04:00
|
|
|
#endif
|
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
// any debug code breakpoint is set
|
|
|
|
|| ((BX_CPU_THIS_PTR dr7 & 0xff) &&
|
|
|
|
(((BX_CPU_THIS_PTR dr7 >> 16) & 3) == 0 ||
|
|
|
|
((BX_CPU_THIS_PTR dr7 >> 20) & 3) == 0 ||
|
|
|
|
((BX_CPU_THIS_PTR dr7 >> 24) & 3) == 0 ||
|
|
|
|
((BX_CPU_THIS_PTR dr7 >> 28) & 3) == 0))
|
2003-08-04 20:03:09 +04:00
|
|
|
#endif
|
|
|
|
))
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR async_event = 0;
|
2002-10-05 18:51:25 +04:00
|
|
|
|
|
|
|
return 0; // Continue executing cpu_loop.
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// boundaries of consideration:
|
|
|
|
//
|
|
|
|
// * physical memory boundary: 1024k (1Megabyte) (increments of...)
|
|
|
|
// * A20 boundary: 1024k (1Megabyte)
|
|
|
|
// * page boundary: 4k
|
|
|
|
// * ROM boundary: 2k (dont care since we are only reading)
|
|
|
|
// * segment boundary: any
|
|
|
|
|
2005-02-12 17:00:13 +03:00
|
|
|
void BX_CPU_C::prefetch(void)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
2008-10-11 00:49:16 +04:00
|
|
|
bx_address laddr;
|
|
|
|
unsigned pageOffset;
|
2005-08-05 22:23:36 +04:00
|
|
|
|
2008-05-10 19:02:42 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
2009-11-30 00:01:26 +03:00
|
|
|
if (long64_mode()) {
|
2008-05-10 19:02:42 +04:00
|
|
|
if (! IsCanonical(RIP)) {
|
|
|
|
BX_ERROR(("prefetch: #GP(0): RIP crossed canonical boundary"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-05-10 19:02:42 +04:00
|
|
|
}
|
2008-10-11 00:49:16 +04:00
|
|
|
|
|
|
|
// linear address is equal to RIP in 64-bit long mode
|
|
|
|
pageOffset = PAGE_OFFSET(EIP);
|
|
|
|
laddr = RIP;
|
|
|
|
|
|
|
|
// Calculate RIP at the beginning of the page.
|
|
|
|
BX_CPU_THIS_PTR eipPageBias = pageOffset - RIP;
|
|
|
|
BX_CPU_THIS_PTR eipPageWindowSize = 4096;
|
2008-05-10 19:02:42 +04:00
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
2008-10-07 02:19:22 +04:00
|
|
|
BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP); /* avoid 32-bit EIP wrap */
|
2008-10-11 00:49:16 +04:00
|
|
|
laddr = BX_CPU_THIS_PTR get_laddr32(BX_SEG_REG_CS, EIP);
|
|
|
|
pageOffset = PAGE_OFFSET(laddr);
|
|
|
|
|
|
|
|
// Calculate RIP at the beginning of the page.
|
2008-11-11 20:44:19 +03:00
|
|
|
BX_CPU_THIS_PTR eipPageBias = (bx_address) pageOffset - EIP;
|
2008-10-11 00:49:16 +04:00
|
|
|
|
|
|
|
Bit32u limit = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled;
|
|
|
|
if (EIP > limit) {
|
|
|
|
BX_ERROR(("prefetch: EIP [%08x] > CS.limit [%08x]", EIP, limit));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2005-08-05 22:23:36 +04:00
|
|
|
}
|
2008-11-10 01:33:44 +03:00
|
|
|
|
2008-11-11 20:44:19 +03:00
|
|
|
BX_CPU_THIS_PTR eipPageWindowSize = 4096;
|
|
|
|
if (limit + BX_CPU_THIS_PTR eipPageBias < 4096) {
|
2009-04-07 20:12:19 +04:00
|
|
|
BX_CPU_THIS_PTR eipPageWindowSize = (Bit32u)(limit + BX_CPU_THIS_PTR eipPageBias + 1);
|
2008-11-11 20:44:19 +03:00
|
|
|
}
|
2005-02-05 23:56:44 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2008-04-06 00:41:00 +04:00
|
|
|
bx_address lpf = LPFOf(laddr);
|
|
|
|
unsigned TLB_index = BX_TLB_INDEX_OF(lpf, 0);
|
|
|
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[TLB_index];
|
|
|
|
Bit8u *fetchPtr = 0;
|
|
|
|
|
2010-03-23 22:58:20 +03:00
|
|
|
if ((tlbEntry->lpf == lpf) && !(tlbEntry->accessBits & (0x4 | USER_PL))) {
|
2009-03-09 00:23:40 +03:00
|
|
|
BX_CPU_THIS_PTR pAddrPage = tlbEntry->ppf;
|
|
|
|
fetchPtr = (Bit8u*) tlbEntry->hostPageAddr;
|
2008-04-06 00:41:00 +04:00
|
|
|
}
|
|
|
|
else {
|
2010-03-19 20:00:05 +03:00
|
|
|
bx_phy_address pAddr = translate_linear(laddr, CPL, BX_EXECUTE);
|
2011-01-23 18:54:54 +03:00
|
|
|
BX_CPU_THIS_PTR pAddrPage = PPFOf(pAddr);
|
2008-10-11 01:09:25 +04:00
|
|
|
}
|
2008-04-06 00:41:00 +04:00
|
|
|
|
|
|
|
if (fetchPtr) {
|
|
|
|
BX_CPU_THIS_PTR eipFetchPtr = fetchPtr;
|
|
|
|
}
|
|
|
|
else {
|
2010-03-16 17:51:20 +03:00
|
|
|
BX_CPU_THIS_PTR eipFetchPtr = (const Bit8u*) getHostMemAddr(BX_CPU_THIS_PTR pAddrPage, BX_EXECUTE);
|
2002-09-02 22:44:35 +04:00
|
|
|
|
2009-06-09 19:23:28 +04:00
|
|
|
// Sanity checks
|
|
|
|
if (! BX_CPU_THIS_PTR eipFetchPtr) {
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrPage + pageOffset;
|
|
|
|
if (pAddr >= BX_MEM(0)->get_memory_len()) {
|
|
|
|
BX_PANIC(("prefetch: running in bogus memory, pAddr=0x" FMT_PHY_ADDRX, pAddr));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
BX_PANIC(("prefetch: getHostMemAddr vetoed direct read, pAddr=0x" FMT_PHY_ADDRX, pAddr));
|
|
|
|
}
|
2002-09-02 22:44:35 +04:00
|
|
|
}
|
2005-01-13 22:03:40 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2009-01-28 00:13:38 +03:00
|
|
|
void BX_CPU_C::deliver_SIPI(unsigned vector)
|
|
|
|
{
|
2009-01-29 23:27:57 +03:00
|
|
|
if (BX_CPU_THIS_PTR activity_state == BX_ACTIVITY_STATE_WAIT_FOR_SIPI) {
|
|
|
|
BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
|
2009-01-28 00:13:38 +03:00
|
|
|
RIP = 0;
|
|
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], vector*0x100);
|
2009-01-31 14:53:57 +03:00
|
|
|
BX_CPU_THIS_PTR disable_INIT = 0; // enable INIT pin back
|
2009-02-20 20:05:03 +03:00
|
|
|
BX_INFO(("CPU %d started up at %04X:%08X by APIC",
|
|
|
|
BX_CPU_THIS_PTR bx_cpuid, vector*0x100, EIP));
|
2009-01-28 00:13:38 +03:00
|
|
|
} else {
|
2009-02-20 20:05:03 +03:00
|
|
|
BX_INFO(("CPU %d started up by APIC, but was not halted at the time", BX_CPU_THIS_PTR bx_cpuid));
|
2009-01-28 00:13:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-20 21:44:15 +03:00
|
|
|
void BX_CPU_C::deliver_INIT(void)
|
|
|
|
{
|
2009-02-03 22:26:09 +03:00
|
|
|
if (! BX_CPU_THIS_PTR disable_INIT) {
|
|
|
|
BX_CPU_THIS_PTR pending_INIT = 1;
|
|
|
|
BX_CPU_THIS_PTR async_event = 1;
|
|
|
|
}
|
2008-11-20 21:44:15 +03:00
|
|
|
}
|
|
|
|
|
2006-04-08 00:47:32 +04:00
|
|
|
void BX_CPU_C::deliver_NMI(void)
|
|
|
|
{
|
2008-12-01 22:06:14 +03:00
|
|
|
BX_CPU_THIS_PTR pending_NMI = 1;
|
2006-04-08 00:47:32 +04:00
|
|
|
BX_CPU_THIS_PTR async_event = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::deliver_SMI(void)
|
|
|
|
{
|
2008-12-01 22:06:14 +03:00
|
|
|
BX_CPU_THIS_PTR pending_SMI = 1;
|
2006-04-08 00:47:32 +04:00
|
|
|
BX_CPU_THIS_PTR async_event = 1;
|
|
|
|
}
|
2002-09-22 05:52:21 +04:00
|
|
|
|
2008-12-07 22:47:34 +03:00
|
|
|
void BX_CPU_C::set_INTR(bx_bool value)
|
|
|
|
{
|
|
|
|
BX_CPU_THIS_PTR INTR = value;
|
|
|
|
BX_CPU_THIS_PTR async_event = 1;
|
|
|
|
}
|
|
|
|
|
2008-10-01 13:44:40 +04:00
|
|
|
#if BX_DEBUGGER || BX_GDBSTUB
|
2007-03-06 20:47:18 +03:00
|
|
|
bx_bool BX_CPU_C::dbg_instruction_epilog(void)
|
|
|
|
{
|
|
|
|
#if BX_DEBUGGER
|
2005-08-28 21:37:37 +04:00
|
|
|
Bit64u tt = bx_pc_system.time_ticks();
|
2007-09-26 22:07:39 +04:00
|
|
|
bx_address debug_eip = RIP;
|
2006-05-24 20:46:57 +04:00
|
|
|
Bit16u cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
2005-08-15 09:32:36 +04:00
|
|
|
|
2009-03-17 22:40:26 +03:00
|
|
|
BX_CPU_THIS_PTR guard_found.icount++;
|
2001-05-24 22:46:34 +04:00
|
|
|
BX_CPU_THIS_PTR guard_found.cs = cs;
|
2006-05-24 20:46:57 +04:00
|
|
|
BX_CPU_THIS_PTR guard_found.eip = debug_eip;
|
2008-04-07 23:00:30 +04:00
|
|
|
BX_CPU_THIS_PTR guard_found.laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, debug_eip);
|
2009-03-23 00:12:35 +03:00
|
|
|
BX_CPU_THIS_PTR guard_found.code_32_64 = BX_CPU_THIS_PTR fetchModeMask;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2009-03-17 22:40:26 +03:00
|
|
|
//
|
|
|
|
// Take care of break point conditions generated during instruction execution
|
|
|
|
//
|
|
|
|
|
|
|
|
// Check if we hit read/write or time breakpoint
|
|
|
|
if (BX_CPU_THIS_PTR break_point) {
|
|
|
|
switch (BX_CPU_THIS_PTR break_point) {
|
|
|
|
case BREAK_POINT_TIME:
|
2009-04-11 17:58:34 +04:00
|
|
|
BX_INFO(("[" FMT_LL "d] Caught time breakpoint", tt));
|
2009-03-17 22:40:26 +03:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_TIME_BREAK_POINT;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
case BREAK_POINT_READ:
|
2009-04-11 17:58:34 +04:00
|
|
|
BX_INFO(("[" FMT_LL "d] Caught read watch point", tt));
|
2009-03-17 22:40:26 +03:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_READ_WATCH_POINT;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
case BREAK_POINT_WRITE:
|
2009-04-11 17:58:34 +04:00
|
|
|
BX_INFO(("[" FMT_LL "d] Caught write watch point", tt));
|
2009-03-17 22:40:26 +03:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_WRITE_WATCH_POINT;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
default:
|
|
|
|
BX_PANIC(("Weird break point condition"));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR magic_break) {
|
|
|
|
BX_INFO(("[" FMT_LL "d] Stopped on MAGIC BREAKPOINT", bx_pc_system.time_ticks()));
|
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_MAGIC_BREAK_POINT;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
}
|
|
|
|
|
|
|
|
// convenient point to see if user requested debug break or typed Ctrl-C
|
|
|
|
if (bx_guard.interrupt_requested) {
|
|
|
|
return(1);
|
|
|
|
}
|
|
|
|
|
2006-02-12 23:21:36 +03:00
|
|
|
// support for 'show' command in debugger
|
2009-03-17 22:40:26 +03:00
|
|
|
extern unsigned dbg_show_mask;
|
2006-02-12 23:21:36 +03:00
|
|
|
if(dbg_show_mask) {
|
|
|
|
int rv = bx_dbg_show_symbolic();
|
|
|
|
if (rv) return(rv);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2009-03-17 22:40:26 +03:00
|
|
|
// Just committed an instruction, before fetching a new one
|
2001-04-10 05:04:59 +04:00
|
|
|
// see if debugger is looking for iaddr breakpoint of any type
|
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_ALL) {
|
2007-10-13 02:11:25 +04:00
|
|
|
#if (BX_DBG_MAX_VIR_BPOINTS > 0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_VIR) {
|
2009-04-11 17:58:34 +04:00
|
|
|
for (unsigned n=0; n<bx_guard.iaddr.num_virtual; n++) {
|
|
|
|
if (bx_guard.iaddr.vir[n].enabled &&
|
|
|
|
(bx_guard.iaddr.vir[n].cs == cs) &&
|
|
|
|
(bx_guard.iaddr.vir[n].eip == debug_eip))
|
|
|
|
{
|
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_VIR;
|
|
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = n;
|
|
|
|
BX_CPU_THIS_PTR guard_found.time_tick = tt;
|
|
|
|
return(1); // on a breakpoint
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
}
|
2005-04-12 22:08:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
#endif
|
2007-10-13 02:11:25 +04:00
|
|
|
#if (BX_DBG_MAX_LIN_BPOINTS > 0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_LIN) {
|
2009-04-11 17:58:34 +04:00
|
|
|
for (unsigned n=0; n<bx_guard.iaddr.num_linear; n++) {
|
|
|
|
if (bx_guard.iaddr.lin[n].enabled &&
|
|
|
|
(bx_guard.iaddr.lin[n].addr == BX_CPU_THIS_PTR guard_found.laddr))
|
|
|
|
{
|
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_LIN;
|
|
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = n;
|
|
|
|
BX_CPU_THIS_PTR guard_found.time_tick = tt;
|
|
|
|
return(1); // on a breakpoint
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
}
|
2005-04-12 22:08:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
#endif
|
2007-10-13 02:11:25 +04:00
|
|
|
#if (BX_DBG_MAX_PHY_BPOINTS > 0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_PHY) {
|
2006-06-17 16:09:55 +04:00
|
|
|
bx_phy_address phy;
|
|
|
|
bx_bool valid = dbg_xlate_linear2phy(BX_CPU_THIS_PTR guard_found.laddr, &phy);
|
2009-04-11 18:02:13 +04:00
|
|
|
if (valid) {
|
|
|
|
for (unsigned n=0; n<bx_guard.iaddr.num_physical; n++) {
|
|
|
|
if (bx_guard.iaddr.phy[n].enabled && (bx_guard.iaddr.phy[n].addr == phy))
|
|
|
|
{
|
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_PHY;
|
|
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = n;
|
|
|
|
BX_CPU_THIS_PTR guard_found.time_tick = tt;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-04-12 22:08:10 +04:00
|
|
|
#endif
|
|
|
|
}
|
2009-03-17 22:40:26 +03:00
|
|
|
#endif
|
2005-08-28 21:37:37 +04:00
|
|
|
|
2009-03-17 22:40:26 +03:00
|
|
|
#if BX_GDBSTUB
|
|
|
|
if (bx_dbg.gdbstub_enabled) {
|
|
|
|
unsigned reason = bx_gdbstub_check(EIP);
|
|
|
|
if (reason != GDBSTUB_STOP_NO_REASON) return(1);
|
2006-04-29 20:14:47 +04:00
|
|
|
}
|
2009-03-17 22:40:26 +03:00
|
|
|
#endif
|
2006-04-29 20:14:47 +04:00
|
|
|
|
2009-03-17 22:40:26 +03:00
|
|
|
return(0);
|
2006-04-29 20:14:47 +04:00
|
|
|
}
|
2009-03-17 22:40:26 +03:00
|
|
|
#endif // BX_DEBUGGER || BX_GDBSTUB
|
|
|
|
|
|
|
|
#if BX_DEBUGGER
|
2006-04-29 20:14:47 +04:00
|
|
|
|
2005-08-28 21:37:37 +04:00
|
|
|
void BX_CPU_C::dbg_take_irq(void)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
// NOTE: similar code in ::cpu_loop()
|
|
|
|
|
2006-05-24 20:46:57 +04:00
|
|
|
if (BX_CPU_INTR && BX_CPU_THIS_PTR get_IF()) {
|
|
|
|
if (setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0) {
|
2001-04-10 05:04:59 +04:00
|
|
|
// normal return from setjmp setup
|
2006-03-16 23:24:09 +03:00
|
|
|
unsigned vector = DEV_pic_iac(); // may set INTR with next interrupt
|
|
|
|
BX_CPU_THIS_PTR EXT = 1; // external event
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
|
2009-01-21 00:28:43 +03:00
|
|
|
interrupt(vector, BX_EXTERNAL_INTERRUPT, 0, 0);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
2005-04-12 22:08:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2005-08-28 21:37:37 +04:00
|
|
|
void BX_CPU_C::dbg_force_interrupt(unsigned vector)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
2006-06-26 01:44:46 +04:00
|
|
|
// Used to force simulator to take an interrupt, without
|
2001-04-10 05:04:59 +04:00
|
|
|
// regard to IF
|
|
|
|
|
2006-05-24 20:46:57 +04:00
|
|
|
if (setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0) {
|
2001-04-10 05:04:59 +04:00
|
|
|
// normal return from setjmp setup
|
2006-03-16 23:24:09 +03:00
|
|
|
BX_CPU_THIS_PTR EXT = 1; // external event
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR async_event = 1; // probably don't need this
|
2009-01-21 00:28:43 +03:00
|
|
|
interrupt(vector, BX_EXTERNAL_INTERRUPT, 0, 0);
|
2005-04-12 22:08:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2005-08-28 21:37:37 +04:00
|
|
|
void BX_CPU_C::dbg_take_dma(void)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
// NOTE: similar code in ::cpu_loop()
|
2006-03-16 23:24:09 +03:00
|
|
|
if (BX_HRQ) {
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
|
2002-10-25 01:07:56 +04:00
|
|
|
DEV_dma_raise_hlda();
|
2005-04-12 22:08:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
2006-03-16 23:24:09 +03:00
|
|
|
|
2005-04-12 22:08:10 +04:00
|
|
|
#endif // #if BX_DEBUGGER
|