2001-10-03 17:10:38 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2002-09-28 04:54:05 +04:00
|
|
|
// $Id: cpu.cc,v 1.53 2002-09-28 00:54:04 kevinlawton Exp $
|
2001-10-03 17:10:38 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2001-04-10 06:20:02 +04:00
|
|
|
// Copyright (C) 2001 MandrakeSoft S.A.
|
2001-04-10 05:04:59 +04:00
|
|
|
//
|
|
|
|
// MandrakeSoft S.A.
|
|
|
|
// 43, rue d'Aboukir
|
|
|
|
// 75002 Paris - France
|
|
|
|
// http://www.linux-mandrake.com/
|
|
|
|
// http://www.mandrakesoft.com/
|
|
|
|
//
|
|
|
|
// This library is free software; you can redistribute it and/or
|
|
|
|
// modify it under the terms of the GNU Lesser General Public
|
|
|
|
// License as published by the Free Software Foundation; either
|
|
|
|
// version 2 of the License, or (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
// Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public
|
|
|
|
// License along with this library; if not, write to the Free Software
|
|
|
|
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
|
|
|
|
|
|
|
|
|
2001-05-24 22:46:34 +04:00
|
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
2001-04-10 05:04:59 +04:00
|
|
|
#include "bochs.h"
|
merge in BRANCH-io-cleanup.
To see the commit logs for this use either cvsweb or
cvs update -r BRANCH-io-cleanup and then 'cvs log' the various files.
In general this provides a generic interface for logging.
logfunctions:: is a class that is inherited by some classes, and also
. allocated as a standalone global called 'genlog'. All logging uses
. one of the ::info(), ::error(), ::ldebug(), ::panic() methods of this
. class through 'BX_INFO(), BX_ERROR(), BX_DEBUG(), BX_PANIC()' macros
. respectively.
.
. An example usage:
. BX_INFO(("Hello, World!\n"));
iofunctions:: is a class that is allocated once by default, and assigned
as the iofunction of each logfunctions instance. It is this class that
maintains the file descriptor and other output related code, at this
point using vfprintf(). At some future point, someone may choose to
write a gui 'console' for bochs to which messages would be redirected
simply by assigning a different iofunction class to the various logfunctions
objects.
More cleanup is coming, but this works for now. If you want to see alot
of debugging output, in main.cc, change onoff[LOGLEV_DEBUG]=0 to =1.
Comments, bugs, flames, to me: todd@fries.net
2001-05-15 18:49:57 +04:00
|
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2001-05-23 12:16:07 +04:00
|
|
|
#if BX_USE_CPU_SMF
|
|
|
|
#define this (BX_CPU(0))
|
|
|
|
#endif
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2002-09-12 11:16:37 +04:00
|
|
|
#if BX_EXTERNAL_DEBUGGER
|
|
|
|
#include "cpu/extdb.h"
|
|
|
|
#endif
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
|
|
|
|
#if BX_SIM_ID == 0 // only need to define once
|
|
|
|
// This array defines a look-up table for the even parity-ness
|
|
|
|
// of an 8bit quantity, for optimal assignment of the parity bit
|
|
|
|
// in the EFLAGS register
|
|
|
|
const Boolean bx_parity_lookup[256] = {
|
|
|
|
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
|
|
|
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
|
|
|
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
|
|
|
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
|
|
|
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
|
|
|
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
|
|
|
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
|
|
|
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
|
|
|
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
|
|
|
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
|
|
|
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
|
|
|
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
|
|
|
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
|
|
|
|
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
|
|
|
0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
|
|
|
|
1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2001-06-05 21:35:08 +04:00
|
|
|
#if BX_SMP_PROCESSORS==1
|
|
|
|
// single processor simulation, so there's one of everything
|
|
|
|
BX_CPU_C bx_cpu;
|
|
|
|
BX_MEM_C bx_mem;
|
|
|
|
#else
|
|
|
|
// multiprocessor simulation, we need an array of cpus and memories
|
2001-05-23 12:16:07 +04:00
|
|
|
BX_CPU_C *bx_cpu_array[BX_SMP_PROCESSORS];
|
|
|
|
BX_MEM_C *bx_mem_array[BX_ADDRESS_SPACES];
|
2001-06-05 21:35:08 +04:00
|
|
|
#endif
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// notes:
|
|
|
|
//
|
|
|
|
// check limit of CS?
|
|
|
|
|
|
|
|
#ifdef REGISTER_IADDR
|
2002-09-14 07:01:05 +04:00
|
|
|
extern void REGISTER_IADDR(bx_addr addr);
|
2001-04-10 05:04:59 +04:00
|
|
|
#endif
|
|
|
|
|
2001-06-05 19:56:19 +04:00
|
|
|
// The CHECK_MAX_INSTRUCTIONS macro allows cpu_loop to execute a few
|
|
|
|
// instructions and then return so that the other processors have a chance to
|
|
|
|
// run. This is used only when simulating multiple processors.
|
2001-05-23 12:16:07 +04:00
|
|
|
//
|
|
|
|
// If maximum instructions have been executed, return. A count less
|
|
|
|
// than zero means run forever.
|
|
|
|
#define CHECK_MAX_INSTRUCTIONS(count) \
|
|
|
|
if (count >= 0) { \
|
|
|
|
count--; if (count == 0) return; \
|
|
|
|
}
|
2001-06-05 19:56:19 +04:00
|
|
|
|
|
|
|
#if BX_SMP_PROCESSORS==1
|
|
|
|
# define BX_TICK1_IF_SINGLE_PROCESSOR() BX_TICK1()
|
2001-05-23 12:16:07 +04:00
|
|
|
#else
|
2001-06-05 19:56:19 +04:00
|
|
|
# define BX_TICK1_IF_SINGLE_PROCESSOR()
|
2001-05-23 12:16:07 +04:00
|
|
|
#endif
|
|
|
|
|
2002-09-14 07:01:05 +04:00
|
|
|
// Make code more tidy with a few macros.
|
|
|
|
#if BX_SUPPORT_X86_64==0
|
|
|
|
#define RIP EIP
|
|
|
|
#define RSP ESP
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#if BX_DYNAMIC_TRANSLATION == 0
|
|
|
|
void
|
2001-05-23 12:16:07 +04:00
|
|
|
BX_CPU_C::cpu_loop(Bit32s max_instr_count)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
unsigned ret;
|
2002-09-19 23:17:20 +04:00
|
|
|
bxInstruction_c *i;
|
|
|
|
bxInstruction_c iStorage BX_CPP_AlignN(32);
|
|
|
|
i = &iStorage;
|
2002-09-22 05:52:21 +04:00
|
|
|
|
|
|
|
#if BX_USE_CPU_SMF
|
|
|
|
#define DeclareExecute() void (*execute)(bxInstruction_c *)
|
|
|
|
#define DeclareResolveModRM() void (*resolveModRM)(bxInstruction_c *)
|
|
|
|
#else
|
|
|
|
#define DeclareExecute() void (BX_CPU_C::*execute)(bxInstruction_c *)
|
|
|
|
#define DeclareResolveModRM() void (BX_CPU_C::*resolveModRM)(bxInstruction_c *)
|
2002-09-19 23:17:20 +04:00
|
|
|
#endif
|
2002-06-04 02:39:11 +04:00
|
|
|
|
2002-09-22 05:52:21 +04:00
|
|
|
DeclareExecute();
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR break_point = 0;
|
|
|
|
#ifdef MAGIC_BREAKPOINT
|
|
|
|
BX_CPU_THIS_PTR magic_break = 0;
|
|
|
|
#endif
|
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_NO_REASON;
|
|
|
|
#endif
|
|
|
|
|
2002-09-19 23:17:20 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
(void) setjmp( BX_CPU_THIS_PTR jmp_buf_env );
|
|
|
|
|
2002-09-02 22:44:35 +04:00
|
|
|
// We get here either by a normal function call, or by a longjmp
|
|
|
|
// back from an exception() call. In either case, commit the
|
|
|
|
// new EIP/ESP, and set up other environmental fields. This code
|
|
|
|
// mirrors similar code below, after the interrupt() call.
|
2002-09-14 07:01:05 +04:00
|
|
|
BX_CPU_THIS_PTR prev_eip = RIP; // commit new EIP
|
|
|
|
BX_CPU_THIS_PTR prev_esp = RSP; // commit new ESP
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR EXT = 0;
|
|
|
|
BX_CPU_THIS_PTR errorno = 0;
|
|
|
|
|
2002-09-02 22:44:35 +04:00
|
|
|
main_cpu_loop:
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
// First check on events which occurred for previous instructions
|
|
|
|
// (traps) and ones which are asynchronous to the CPU
|
|
|
|
// (hardware interrupts).
|
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
|
|
|
goto handle_async_event;
|
|
|
|
|
|
|
|
async_events_processed:
|
|
|
|
|
|
|
|
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
{
|
|
|
|
Bit32u debug_eip = BX_CPU_THIS_PTR prev_eip;
|
|
|
|
if ( dbg_is_begin_instr_bpoint(
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
|
|
|
|
debug_eip,
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base + debug_eip,
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b) ) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // #if BX_DEBUGGER
|
2002-09-12 11:16:37 +04:00
|
|
|
|
|
|
|
#if BX_EXTERNAL_DEBUGGER
|
|
|
|
if (regs.debug_state != debug_run) {
|
|
|
|
bx_external_debugger(this);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2002-09-02 22:44:35 +04:00
|
|
|
{
|
2002-09-14 07:01:05 +04:00
|
|
|
bx_address eipBiased;
|
2002-09-02 22:44:35 +04:00
|
|
|
Bit8u *fetchPtr;
|
|
|
|
|
2002-09-14 07:01:05 +04:00
|
|
|
eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2002-09-02 22:44:35 +04:00
|
|
|
if ( eipBiased >= BX_CPU_THIS_PTR eipPageWindowSize ) {
|
2001-04-10 05:04:59 +04:00
|
|
|
prefetch();
|
2002-09-14 07:01:05 +04:00
|
|
|
eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2002-09-19 23:17:20 +04:00
|
|
|
#if BX_SupportICache
|
|
|
|
unsigned iCacheHash;
|
|
|
|
Bit32u pAddr, pageWriteStamp, fetchModeMask;
|
2002-09-02 22:44:35 +04:00
|
|
|
|
2002-09-19 23:17:20 +04:00
|
|
|
pAddr = BX_CPU_THIS_PTR pAddrA20Page + eipBiased;
|
|
|
|
iCacheHash = BX_CPU_THIS_PTR iCache.hash( pAddr );
|
|
|
|
i = & BX_CPU_THIS_PTR iCache.entry[iCacheHash].i;
|
|
|
|
|
|
|
|
pageWriteStamp = BX_CPU_THIS_PTR iCache.pageWriteStampTable[pAddr>>12];
|
|
|
|
fetchModeMask = BX_CPU_THIS_PTR iCache.fetchModeMask;
|
2002-09-22 05:52:21 +04:00
|
|
|
|
2002-09-19 23:17:20 +04:00
|
|
|
if ( (BX_CPU_THIS_PTR iCache.entry[iCacheHash].pAddr == pAddr) &&
|
2002-09-22 05:52:21 +04:00
|
|
|
(BX_CPU_THIS_PTR iCache.entry[iCacheHash].writeStamp == pageWriteStamp) &&
|
2002-09-19 23:17:20 +04:00
|
|
|
((pageWriteStamp & fetchModeMask) == fetchModeMask) ) {
|
|
|
|
|
|
|
|
// iCache hit. Instruction is already decoded and stored in
|
|
|
|
// the instruction cache.
|
2002-09-22 05:52:21 +04:00
|
|
|
DeclareResolveModRM();
|
|
|
|
resolveModRM = i->ResolveModrm; // Get as soon as possible for speculation.
|
2002-09-19 23:17:20 +04:00
|
|
|
|
2002-09-22 05:52:21 +04:00
|
|
|
execute = i->execute; // fetch as soon as possible for speculation.
|
2002-09-19 23:17:20 +04:00
|
|
|
if (resolveModRM) {
|
|
|
|
BX_CPU_CALL_METHOD(resolveModRM, (i));
|
|
|
|
}
|
2002-09-14 07:01:05 +04:00
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
2002-09-19 23:17:20 +04:00
|
|
|
// iCache miss. No validated instruction with matching fetch parameters
|
|
|
|
// is in the iCache. Or we're not compiling iCache support in, in which
|
|
|
|
// case we always have an iCache miss. :^)
|
|
|
|
bx_address remainingInPage;
|
|
|
|
unsigned maxFetch;
|
|
|
|
|
|
|
|
remainingInPage = (BX_CPU_THIS_PTR eipPageWindowSize - eipBiased);
|
|
|
|
maxFetch = 15;
|
|
|
|
if (remainingInPage < 15)
|
|
|
|
maxFetch = remainingInPage;
|
|
|
|
fetchPtr = BX_CPU_THIS_PTR eipFetchPtr + eipBiased;
|
|
|
|
|
2002-09-22 05:52:21 +04:00
|
|
|
#if BX_SupportICache
|
|
|
|
// In the case where the page is marked ICacheWriteStampInvalid, all
|
|
|
|
// counter bits will be high, being eqivalent to ICacheWriteStampMax.
|
|
|
|
// In the case where the page is marked as possibly having associated
|
|
|
|
// iCache entries, we need to leave the counter as-is, unless we're
|
|
|
|
// willing to dump all iCache entries which can hash to this page.
|
|
|
|
// Therefore, in either case, we can keep the counter as-is and
|
|
|
|
// replace the fetch mode bits.
|
|
|
|
pageWriteStamp &= 0x1fffffff; // Clear out old fetch mode bits.
|
|
|
|
pageWriteStamp |= fetchModeMask; // Add in new ones.
|
|
|
|
BX_CPU_THIS_PTR iCache.pageWriteStampTable[pAddr>>12] = pageWriteStamp;
|
|
|
|
BX_CPU_THIS_PTR iCache.entry[iCacheHash].pAddr = pAddr;
|
|
|
|
BX_CPU_THIS_PTR iCache.entry[iCacheHash].writeStamp = pageWriteStamp;
|
|
|
|
#endif
|
|
|
|
|
2002-09-19 23:17:20 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
|
|
|
|
ret = fetchDecode64(fetchPtr, i, maxFetch);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
2002-09-22 05:52:21 +04:00
|
|
|
ret = fetchDecode(fetchPtr, i, maxFetch);
|
2002-09-19 23:17:20 +04:00
|
|
|
}
|
2002-09-22 05:52:21 +04:00
|
|
|
|
|
|
|
DeclareResolveModRM();
|
|
|
|
resolveModRM = i->ResolveModrm; // Get function pointers as early
|
2002-09-19 23:17:20 +04:00
|
|
|
if (ret==0) {
|
2002-09-22 05:52:21 +04:00
|
|
|
#if BX_SupportICache
|
2002-09-19 23:17:20 +04:00
|
|
|
// Invalidate entry, since fetch-decode failed with partial updates
|
|
|
|
// to the i-> structure.
|
|
|
|
BX_CPU_THIS_PTR iCache.entry[iCacheHash].writeStamp =
|
2002-09-22 05:52:21 +04:00
|
|
|
ICacheWriteStampInvalid;
|
|
|
|
i = &iStorage;
|
2002-09-23 03:42:01 +04:00
|
|
|
#endif
|
2002-09-22 05:52:21 +04:00
|
|
|
boundaryFetch(i);
|
|
|
|
resolveModRM = i->ResolveModrm; // Get function pointers as early
|
2002-09-19 23:17:20 +04:00
|
|
|
}
|
|
|
|
|
2002-09-28 04:54:05 +04:00
|
|
|
// An instruction will have been fetched using either the normal case,
|
|
|
|
// or the boundary fetch (across pages), by this point.
|
|
|
|
BX_INSTR_FETCH_DECODE_COMPLETED(CPU_ID, i);
|
|
|
|
|
2002-09-22 05:52:21 +04:00
|
|
|
execute = i->execute; // fetch as soon as possible for speculation.
|
2002-09-19 23:17:20 +04:00
|
|
|
if (resolveModRM) {
|
|
|
|
BX_CPU_CALL_METHOD(resolveModRM, (i));
|
|
|
|
}
|
2002-09-14 07:01:05 +04:00
|
|
|
}
|
2002-09-02 22:44:35 +04:00
|
|
|
}
|
2002-06-04 02:39:11 +04:00
|
|
|
|
2002-09-28 04:54:05 +04:00
|
|
|
// An instruction was either fetched, or found in the iCache.
|
|
|
|
BX_INSTR_OPCODE(CPU_ID, fetchPtr, i->ilen(),
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b);
|
2002-09-19 23:17:20 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2002-02-16 01:58:06 +03:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
if (BX_CPU_THIS_PTR trace) {
|
|
|
|
// print the instruction that is about to be executed.
|
2002-03-21 02:50:23 +03:00
|
|
|
#if (BX_SMP_PROCESSORS==1)
|
|
|
|
bx_dbg_disassemble_current (0, 1); // only one cpu, print time stamp
|
|
|
|
#else
|
|
|
|
bx_dbg_disassemble_current (local_apic.get_id (), 1); // this cpu only
|
|
|
|
#endif
|
2002-02-16 01:58:06 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2002-09-19 23:17:20 +04:00
|
|
|
if ( !(i->repUsedL() && i->repeatableL()) ) {
|
2002-09-02 22:44:35 +04:00
|
|
|
// non repeating instruction
|
2002-09-19 23:17:20 +04:00
|
|
|
RIP += i->ilen();
|
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
2002-09-02 22:44:35 +04:00
|
|
|
|
2002-09-14 07:01:05 +04:00
|
|
|
BX_CPU_THIS_PTR prev_eip = RIP; // commit new EIP
|
|
|
|
BX_CPU_THIS_PTR prev_esp = RSP; // commit new ESP
|
2002-09-02 22:44:35 +04:00
|
|
|
#ifdef REGISTER_IADDR
|
2002-09-14 07:01:05 +04:00
|
|
|
REGISTER_IADDR(RIP + BX_CPU_THIS_PTR sregs[BX_SREG_CS].cache.u.segment.base);
|
2002-09-02 22:44:35 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
|
|
|
}
|
|
|
|
|
|
|
|
else {
|
2001-04-10 05:04:59 +04:00
|
|
|
repeat_loop:
|
2002-09-19 23:17:20 +04:00
|
|
|
if (i->repeatableZFL()) {
|
2002-09-14 07:01:05 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
2002-09-19 23:17:20 +04:00
|
|
|
if (i->as64L()) {
|
2002-09-14 07:01:05 +04:00
|
|
|
if (RCX != 0) {
|
2002-09-19 23:17:20 +04:00
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
2002-09-14 07:01:05 +04:00
|
|
|
RCX -= 1;
|
|
|
|
}
|
2002-09-19 23:17:20 +04:00
|
|
|
if ((i->repUsedValue()==3) && (get_ZF()==0)) goto repeat_done;
|
|
|
|
if ((i->repUsedValue()==2) && (get_ZF()!=0)) goto repeat_done;
|
2002-09-14 07:01:05 +04:00
|
|
|
if (RCX == 0) goto repeat_done;
|
|
|
|
goto repeat_not_done;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
2002-09-19 23:17:20 +04:00
|
|
|
if (i->as32L()) {
|
2001-04-10 05:04:59 +04:00
|
|
|
if (ECX != 0) {
|
2002-09-19 23:17:20 +04:00
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
2001-04-10 05:04:59 +04:00
|
|
|
ECX -= 1;
|
|
|
|
}
|
2002-09-19 23:17:20 +04:00
|
|
|
if ((i->repUsedValue()==3) && (get_ZF()==0)) goto repeat_done;
|
|
|
|
if ((i->repUsedValue()==2) && (get_ZF()!=0)) goto repeat_done;
|
2001-04-10 05:04:59 +04:00
|
|
|
if (ECX == 0) goto repeat_done;
|
|
|
|
goto repeat_not_done;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (CX != 0) {
|
2002-09-19 23:17:20 +04:00
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
2001-04-10 05:04:59 +04:00
|
|
|
CX -= 1;
|
|
|
|
}
|
2002-09-19 23:17:20 +04:00
|
|
|
if ((i->repUsedValue()==3) && (get_ZF()==0)) goto repeat_done;
|
|
|
|
if ((i->repUsedValue()==2) && (get_ZF()!=0)) goto repeat_done;
|
2001-04-10 05:04:59 +04:00
|
|
|
if (CX == 0) goto repeat_done;
|
|
|
|
goto repeat_not_done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else { // normal repeat, no concern for ZF
|
2002-09-14 07:01:05 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
2002-09-19 23:17:20 +04:00
|
|
|
if (i->as64L()) {
|
2002-09-14 07:01:05 +04:00
|
|
|
if (RCX != 0) {
|
2002-09-19 23:17:20 +04:00
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
2002-09-14 07:01:05 +04:00
|
|
|
RCX -= 1;
|
|
|
|
}
|
|
|
|
if (RCX == 0) goto repeat_done;
|
|
|
|
goto repeat_not_done;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
2002-09-19 23:17:20 +04:00
|
|
|
if (i->as32L()) {
|
2001-04-10 05:04:59 +04:00
|
|
|
if (ECX != 0) {
|
2002-09-19 23:17:20 +04:00
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
2001-04-10 05:04:59 +04:00
|
|
|
ECX -= 1;
|
|
|
|
}
|
|
|
|
if (ECX == 0) goto repeat_done;
|
|
|
|
goto repeat_not_done;
|
|
|
|
}
|
|
|
|
else { // 16bit addrsize
|
|
|
|
if (CX != 0) {
|
2002-09-19 23:17:20 +04:00
|
|
|
BX_CPU_CALL_METHOD(execute, (i));
|
2001-04-10 05:04:59 +04:00
|
|
|
CX -= 1;
|
|
|
|
}
|
|
|
|
if (CX == 0) goto repeat_done;
|
|
|
|
goto repeat_not_done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// shouldn't get here from above
|
|
|
|
repeat_not_done:
|
|
|
|
#ifdef REGISTER_IADDR
|
2002-09-14 07:01:05 +04:00
|
|
|
REGISTER_IADDR(RIP + BX_CPU_THIS_PTR sregs[BX_SREG_CS].cache.u.segment.base);
|
2001-04-10 05:04:59 +04:00
|
|
|
#endif
|
2001-10-06 04:00:22 +04:00
|
|
|
|
2002-09-28 04:54:05 +04:00
|
|
|
BX_INSTR_REPEAT_ITERATION(CPU_ID);
|
2001-06-05 19:56:19 +04:00
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
2001-05-23 12:16:07 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#if BX_DEBUGGER == 0
|
|
|
|
if (BX_CPU_THIS_PTR async_event) {
|
|
|
|
invalidate_prefetch_q();
|
|
|
|
goto debugger_check;
|
2001-05-23 12:16:07 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
goto repeat_loop;
|
2001-05-23 12:16:07 +04:00
|
|
|
#else /* if BX_DEBUGGER == 1 */
|
|
|
|
invalidate_prefetch_q();
|
|
|
|
goto debugger_check;
|
|
|
|
#endif
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
repeat_done:
|
2002-09-19 23:17:20 +04:00
|
|
|
RIP += i->ilen();
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2002-09-14 07:01:05 +04:00
|
|
|
BX_CPU_THIS_PTR prev_eip = RIP; // commit new EIP
|
|
|
|
BX_CPU_THIS_PTR prev_esp = RSP; // commit new ESP
|
2001-04-10 05:04:59 +04:00
|
|
|
#ifdef REGISTER_IADDR
|
2002-09-14 07:01:05 +04:00
|
|
|
REGISTER_IADDR(RIP + BX_CPU_THIS_PTR sregs[BX_SREG_CS].cache.u.segment.base);
|
2001-04-10 05:04:59 +04:00
|
|
|
#endif
|
2001-10-06 04:00:22 +04:00
|
|
|
|
2002-09-28 04:54:05 +04:00
|
|
|
BX_INSTR_REPEAT_ITERATION(CPU_ID);
|
2002-09-02 22:44:35 +04:00
|
|
|
BX_TICK1_IF_SINGLE_PROCESSOR();
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
debugger_check:
|
|
|
|
|
2002-09-28 04:54:05 +04:00
|
|
|
// inform instrumentation about new instruction
|
|
|
|
BX_INSTR_NEW_INSTRUCTION(CPU_ID);
|
|
|
|
|
2001-06-05 19:56:19 +04:00
|
|
|
#if (BX_SMP_PROCESSORS>1 && BX_DEBUGGER==0)
|
|
|
|
// The CHECK_MAX_INSTRUCTIONS macro allows cpu_loop to execute a few
|
|
|
|
// instructions and then return so that the other processors have a chance
|
|
|
|
// to run. This is used only when simulating multiple processors. If only
|
|
|
|
// one processor, don't waste any cycles on it! Also, it is not needed
|
|
|
|
// with the debugger because its guard mechanism provides the same
|
|
|
|
// functionality.
|
2001-05-23 12:16:07 +04:00
|
|
|
CHECK_MAX_INSTRUCTIONS(max_instr_count);
|
2001-06-05 19:56:19 +04:00
|
|
|
#endif
|
2001-05-23 12:16:07 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#if BX_DEBUGGER
|
2001-09-27 18:19:38 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
// BW vm mode switch support is in dbg_is_begin_instr_bpoint
|
|
|
|
// note instr generating exceptions never reach this point.
|
|
|
|
|
|
|
|
// (mch) Read/write, time break point support
|
|
|
|
if (BX_CPU_THIS_PTR break_point) {
|
|
|
|
switch (BX_CPU_THIS_PTR break_point) {
|
|
|
|
case BREAK_POINT_TIME:
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_INFO(("[%lld] Caught time breakpoint", bx_pc_system.time_ticks()));
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_TIME_BREAK_POINT;
|
|
|
|
return;
|
|
|
|
case BREAK_POINT_READ:
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_INFO(("[%lld] Caught read watch point", bx_pc_system.time_ticks()));
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_READ_WATCH_POINT;
|
|
|
|
return;
|
|
|
|
case BREAK_POINT_WRITE:
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_INFO(("[%lld] Caught write watch point", bx_pc_system.time_ticks()));
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_WRITE_WATCH_POINT;
|
|
|
|
return;
|
|
|
|
default:
|
merge in BRANCH-io-cleanup.
To see the commit logs for this use either cvsweb or
cvs update -r BRANCH-io-cleanup and then 'cvs log' the various files.
In general this provides a generic interface for logging.
logfunctions:: is a class that is inherited by some classes, and also
. allocated as a standalone global called 'genlog'. All logging uses
. one of the ::info(), ::error(), ::ldebug(), ::panic() methods of this
. class through 'BX_INFO(), BX_ERROR(), BX_DEBUG(), BX_PANIC()' macros
. respectively.
.
. An example usage:
. BX_INFO(("Hello, World!\n"));
iofunctions:: is a class that is allocated once by default, and assigned
as the iofunction of each logfunctions instance. It is this class that
maintains the file descriptor and other output related code, at this
point using vfprintf(). At some future point, someone may choose to
write a gui 'console' for bochs to which messages would be redirected
simply by assigning a different iofunction class to the various logfunctions
objects.
More cleanup is coming, but this works for now. If you want to see alot
of debugging output, in main.cc, change onoff[LOGLEV_DEBUG]=0 to =1.
Comments, bugs, flames, to me: todd@fries.net
2001-05-15 18:49:57 +04:00
|
|
|
BX_PANIC(("Weird break point condition"));
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#ifdef MAGIC_BREAKPOINT
|
|
|
|
// (mch) Magic break point support
|
|
|
|
if (BX_CPU_THIS_PTR magic_break) {
|
|
|
|
if (bx_dbg.magic_break_enabled) {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_DEBUG(("Stopped on MAGIC BREAKPOINT"));
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_MAGIC_BREAK_POINT;
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
BX_CPU_THIS_PTR magic_break = 0;
|
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_NO_REASON;
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_DEBUG(("Ignoring MAGIC BREAKPOINT"));
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
{
|
2001-09-27 18:19:38 +04:00
|
|
|
// check for icount or control-C. If found, set guard reg and return.
|
2001-04-10 05:04:59 +04:00
|
|
|
Bit32u debug_eip = BX_CPU_THIS_PTR prev_eip;
|
|
|
|
if ( dbg_is_end_instr_bpoint(
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
|
|
|
|
debug_eip,
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base + debug_eip,
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b) ) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2001-09-27 18:19:38 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#endif // #if BX_DEBUGGER
|
|
|
|
goto main_cpu_loop;
|
2002-09-19 23:17:20 +04:00
|
|
|
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
//
|
|
|
|
// This area is where we process special conditions and events.
|
|
|
|
//
|
|
|
|
|
|
|
|
handle_async_event:
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR debug_trap & 0x80000000) {
|
|
|
|
// I made up the bitmask above to mean HALT state.
|
2001-06-05 19:56:19 +04:00
|
|
|
#if BX_SMP_PROCESSORS==1
|
2001-07-08 18:36:36 +04:00
|
|
|
BX_CPU_THIS_PTR debug_trap = 0; // clear traps for after resume
|
|
|
|
BX_CPU_THIS_PTR inhibit_mask = 0; // clear inhibits for after resume
|
2001-06-05 19:56:19 +04:00
|
|
|
// for one processor, pass the time as quickly as possible until
|
|
|
|
// an interrupt wakes up the CPU.
|
2002-04-01 17:14:37 +04:00
|
|
|
#if BX_DEBUGGER
|
2002-09-14 07:01:05 +04:00
|
|
|
while (bx_guard.interrupt_requested != 1)
|
2002-04-01 17:14:37 +04:00
|
|
|
#else
|
2002-09-14 07:01:05 +04:00
|
|
|
while (1)
|
2002-04-01 17:14:37 +04:00
|
|
|
#endif
|
2002-09-14 07:01:05 +04:00
|
|
|
{
|
2002-09-12 22:10:46 +04:00
|
|
|
if (BX_CPU_THIS_PTR INTR && BX_CPU_THIS_PTR get_IF ()) {
|
2001-06-05 19:56:19 +04:00
|
|
|
break;
|
|
|
|
}
|
2002-09-12 11:16:37 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event == 0) {
|
|
|
|
BX_INFO(("decode: reset detected in halt state"));
|
|
|
|
break;
|
|
|
|
}
|
2001-06-05 19:56:19 +04:00
|
|
|
BX_TICK1();
|
2002-09-14 07:01:05 +04:00
|
|
|
}
|
2001-06-05 19:56:19 +04:00
|
|
|
#else /* BX_SMP_PROCESSORS != 1 */
|
|
|
|
// for multiprocessor simulation, even if this CPU is halted we still
|
|
|
|
// must give the others a chance to simulate. If an interrupt has
|
|
|
|
// arrived, then clear the HALT condition; otherwise just return from
|
|
|
|
// the CPU loop with stop_reason STOP_CPU_HALTED.
|
2002-09-12 22:10:46 +04:00
|
|
|
if (BX_CPU_THIS_PTR INTR && BX_CPU_THIS_PTR get_IF ()) {
|
2001-05-23 12:16:07 +04:00
|
|
|
// interrupt ends the HALT condition
|
|
|
|
BX_CPU_THIS_PTR debug_trap = 0; // clear traps for after resume
|
|
|
|
BX_CPU_THIS_PTR inhibit_mask = 0; // clear inhibits for after resume
|
2001-05-30 22:56:02 +04:00
|
|
|
//bx_printf ("halt condition has been cleared in %s", name);
|
2001-05-23 12:16:07 +04:00
|
|
|
} else {
|
|
|
|
// HALT condition remains, return so other CPUs have a chance
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_CPU_HALTED;
|
|
|
|
#endif
|
|
|
|
return;
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
2001-06-05 19:56:19 +04:00
|
|
|
#endif
|
2002-04-18 04:22:20 +04:00
|
|
|
} else if (BX_CPU_THIS_PTR kill_bochs_request) {
|
|
|
|
// setting kill_bochs_request causes the cpu loop to return ASAP.
|
|
|
|
return;
|
2001-05-23 12:16:07 +04:00
|
|
|
}
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// Priority 1: Hardware Reset and Machine Checks
|
|
|
|
// RESET
|
|
|
|
// Machine Check
|
|
|
|
// (bochs doesn't support these)
|
|
|
|
|
|
|
|
// Priority 2: Trap on Task Switch
|
|
|
|
// T flag in TSS is set
|
|
|
|
if (BX_CPU_THIS_PTR debug_trap & 0x00008000) {
|
|
|
|
BX_CPU_THIS_PTR dr6 |= BX_CPU_THIS_PTR debug_trap;
|
|
|
|
exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
|
|
|
|
}
|
|
|
|
|
|
|
|
// Priority 3: External Hardware Interventions
|
|
|
|
// FLUSH
|
|
|
|
// STOPCLK
|
|
|
|
// SMI
|
|
|
|
// INIT
|
|
|
|
// (bochs doesn't support these)
|
|
|
|
|
|
|
|
// Priority 4: Traps on Previous Instruction
|
|
|
|
// Breakpoints
|
|
|
|
// Debug Trap Exceptions (TF flag set or data/IO breakpoint)
|
|
|
|
if ( BX_CPU_THIS_PTR debug_trap &&
|
|
|
|
!(BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG) ) {
|
|
|
|
// A trap may be inhibited on this boundary due to an instruction
|
|
|
|
// which loaded SS. If so we clear the inhibit_mask below
|
|
|
|
// and don't execute this code until the next boundary.
|
|
|
|
// Commit debug events to DR6
|
|
|
|
BX_CPU_THIS_PTR dr6 |= BX_CPU_THIS_PTR debug_trap;
|
|
|
|
exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
|
|
|
|
}
|
|
|
|
|
|
|
|
// Priority 5: External Interrupts
|
|
|
|
// NMI Interrupts
|
|
|
|
// Maskable Hardware Interrupts
|
|
|
|
if (BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_INTERRUPTS) {
|
|
|
|
// Processing external interrupts is inhibited on this
|
|
|
|
// boundary because of certain instructions like STI.
|
|
|
|
// inhibit_mask is cleared below, in which case we will have
|
|
|
|
// an opportunity to check interrupts on the next instruction
|
|
|
|
// boundary.
|
|
|
|
}
|
2002-09-20 07:52:59 +04:00
|
|
|
else if (BX_CPU_THIS_PTR INTR && BX_CPU_THIS_PTR get_IF () &&
|
|
|
|
BX_DBG_ASYNC_INTR) {
|
2001-04-10 05:04:59 +04:00
|
|
|
Bit8u vector;
|
|
|
|
|
|
|
|
// NOTE: similar code in ::take_irq()
|
2001-06-12 17:07:43 +04:00
|
|
|
#if BX_SUPPORT_APIC
|
2001-05-23 12:16:07 +04:00
|
|
|
if (BX_CPU_THIS_PTR int_from_local_apic)
|
2001-05-23 23:36:55 +04:00
|
|
|
vector = BX_CPU_THIS_PTR local_apic.acknowledge_int ();
|
2001-05-23 12:16:07 +04:00
|
|
|
else
|
|
|
|
vector = BX_IAC(); // may set INTR with next interrupt
|
|
|
|
#else
|
|
|
|
// if no local APIC, always acknowledge the PIC.
|
2001-04-10 05:04:59 +04:00
|
|
|
vector = BX_IAC(); // may set INTR with next interrupt
|
2001-05-23 12:16:07 +04:00
|
|
|
#endif
|
2001-05-30 22:56:02 +04:00
|
|
|
//BX_DEBUG(("decode: interrupt %u",
|
2001-05-23 12:16:07 +04:00
|
|
|
// (unsigned) vector));
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR errorno = 0;
|
|
|
|
BX_CPU_THIS_PTR EXT = 1; /* external event */
|
|
|
|
interrupt(vector, 0, 0, 0);
|
2002-09-28 04:54:05 +04:00
|
|
|
BX_INSTR_HWINTERRUPT(CPU_ID, vector,
|
2002-09-20 07:52:59 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
|
2002-09-02 22:44:35 +04:00
|
|
|
// Set up environment, as would be when this main cpu loop gets
|
|
|
|
// invoked. At the end of normal instructions, we always commmit
|
|
|
|
// the new EIP/ESP values. But here, we call interrupt() much like
|
|
|
|
// it was a sofware interrupt instruction, and need to effect the
|
|
|
|
// commit here. This code mirrors similar code above.
|
2002-09-14 07:01:05 +04:00
|
|
|
BX_CPU_THIS_PTR prev_eip = RIP; // commit new RIP
|
|
|
|
BX_CPU_THIS_PTR prev_esp = RSP; // commit new RSP
|
2002-09-02 22:44:35 +04:00
|
|
|
BX_CPU_THIS_PTR EXT = 0;
|
|
|
|
BX_CPU_THIS_PTR errorno = 0;
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
else if (BX_HRQ && BX_DBG_ASYNC_DMA) {
|
|
|
|
// NOTE: similar code in ::take_dma()
|
|
|
|
// assert Hold Acknowledge (HLDA) and go into a bus hold state
|
|
|
|
BX_RAISE_HLDA();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Priority 6: Faults from fetching next instruction
|
|
|
|
// Code breakpoint fault
|
|
|
|
// Code segment limit violation (priority 7 on 486/Pentium)
|
|
|
|
// Code page fault (priority 7 on 486/Pentium)
|
|
|
|
// (handled in main decode loop)
|
|
|
|
|
|
|
|
// Priority 7: Faults from decoding next instruction
|
|
|
|
// Instruction length > 15 bytes
|
|
|
|
// Illegal opcode
|
|
|
|
// Coprocessor not available
|
|
|
|
// (handled in main decode loop etc)
|
|
|
|
|
|
|
|
// Priority 8: Faults on executing an instruction
|
|
|
|
// Floating point execution
|
|
|
|
// Overflow
|
|
|
|
// Bound error
|
|
|
|
// Invalid TSS
|
|
|
|
// Segment not present
|
|
|
|
// Stack fault
|
|
|
|
// General protection
|
|
|
|
// Data page fault
|
|
|
|
// Alignment check
|
|
|
|
// (handled by rest of the code)
|
|
|
|
|
|
|
|
|
2002-09-12 22:10:46 +04:00
|
|
|
if (BX_CPU_THIS_PTR get_TF ()) {
|
2001-04-10 05:04:59 +04:00
|
|
|
// TF is set before execution of next instruction. Schedule
|
|
|
|
// a debug trap (#DB) after execution. After completion of
|
|
|
|
// next instruction, the code above will invoke the trap.
|
|
|
|
BX_CPU_THIS_PTR debug_trap |= 0x00004000; // BS flag in DR6
|
|
|
|
}
|
|
|
|
|
2002-09-20 07:52:59 +04:00
|
|
|
// Now we can handle things which are synchronous to instruction
|
|
|
|
// execution.
|
2002-09-22 22:22:24 +04:00
|
|
|
if (BX_CPU_THIS_PTR get_RF ()) {
|
2002-09-20 07:52:59 +04:00
|
|
|
BX_CPU_THIS_PTR clear_RF ();
|
|
|
|
}
|
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
else {
|
|
|
|
// only bother comparing if any breakpoints enabled
|
|
|
|
if ( BX_CPU_THIS_PTR dr7 & 0x000000ff ) {
|
|
|
|
Bit32u iaddr =
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base +
|
|
|
|
BX_CPU_THIS_PTR prev_eip;
|
|
|
|
Bit32u dr6_bits;
|
|
|
|
if ( (dr6_bits = hwdebug_compare(iaddr, 1, BX_HWDebugInstruction,
|
|
|
|
BX_HWDebugInstruction)) ) {
|
|
|
|
// Add to the list of debug events thus far.
|
|
|
|
BX_CPU_THIS_PTR debug_trap |= dr6_bits;
|
|
|
|
BX_CPU_THIS_PTR async_event = 1;
|
|
|
|
// If debug events are not inhibited on this boundary,
|
|
|
|
// fire off a debug fault. Otherwise handle it on the next
|
|
|
|
// boundary. (becomes a trap)
|
|
|
|
if ( !(BX_CPU_THIS_PTR inhibit_mask & BX_INHIBIT_DEBUG) ) {
|
|
|
|
// Commit debug events to DR6
|
|
|
|
BX_CPU_THIS_PTR dr6 = BX_CPU_THIS_PTR debug_trap;
|
|
|
|
exception(BX_DB_EXCEPTION, 0, 0); // no error, not interrupt
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// We have ignored processing of external interrupts and
|
|
|
|
// debug events on this boundary. Reset the mask so they
|
|
|
|
// will be processed on the next boundary.
|
|
|
|
BX_CPU_THIS_PTR inhibit_mask = 0;
|
|
|
|
|
2001-05-23 12:16:07 +04:00
|
|
|
if ( !(BX_CPU_THIS_PTR INTR ||
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR debug_trap ||
|
|
|
|
BX_HRQ ||
|
2002-09-12 22:10:46 +04:00
|
|
|
BX_CPU_THIS_PTR get_TF ()) )
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR async_event = 0;
|
2002-09-20 07:52:59 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
goto async_events_processed;
|
|
|
|
}
|
|
|
|
#endif // #if BX_DYNAMIC_TRANSLATION == 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// boundaries of consideration:
|
|
|
|
//
|
|
|
|
// * physical memory boundary: 1024k (1Megabyte) (increments of...)
|
|
|
|
// * A20 boundary: 1024k (1Megabyte)
|
|
|
|
// * page boundary: 4k
|
|
|
|
// * ROM boundary: 2k (dont care since we are only reading)
|
|
|
|
// * segment boundary: any
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
BX_CPU_C::prefetch(void)
|
|
|
|
{
|
|
|
|
// cs:eIP
|
|
|
|
// prefetch QSIZE byte quantity aligned on corresponding boundary
|
2002-09-14 07:01:05 +04:00
|
|
|
bx_address laddr;
|
2002-09-19 23:17:20 +04:00
|
|
|
Bit32u pAddr;
|
2002-09-14 07:01:05 +04:00
|
|
|
bx_address temp_rip;
|
|
|
|
Bit32u temp_limit;
|
|
|
|
bx_address laddrPageOffset0, eipPageOffset0;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2002-09-14 07:01:05 +04:00
|
|
|
temp_rip = RIP;
|
2001-04-10 05:04:59 +04:00
|
|
|
temp_limit = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled;
|
|
|
|
|
2002-09-02 22:44:35 +04:00
|
|
|
laddr = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base +
|
2002-09-14 07:01:05 +04:00
|
|
|
temp_rip;
|
2002-09-02 22:44:35 +04:00
|
|
|
|
2002-09-14 07:01:05 +04:00
|
|
|
if (((Bit32u)temp_rip) > temp_limit) {
|
|
|
|
BX_PANIC(("prefetch: RIP > CS.limit"));
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2001-08-10 22:42:24 +04:00
|
|
|
#if BX_SUPPORT_PAGING
|
2001-04-10 05:04:59 +04:00
|
|
|
if (BX_CPU_THIS_PTR cr0.pg) {
|
|
|
|
// aligned block guaranteed to be all in one page, same A20 address
|
2002-09-19 23:17:20 +04:00
|
|
|
pAddr = itranslate_linear(laddr, CPL==3);
|
|
|
|
pAddr = A20ADDR(pAddr);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
2001-08-10 22:42:24 +04:00
|
|
|
else
|
|
|
|
#endif // BX_SUPPORT_PAGING
|
|
|
|
{
|
2002-09-19 23:17:20 +04:00
|
|
|
pAddr = A20ADDR(laddr);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// check if segment boundary comes into play
|
2002-09-14 07:01:05 +04:00
|
|
|
//if ((temp_limit - (Bit32u)temp_rip) < 4096) {
|
2001-04-10 05:04:59 +04:00
|
|
|
// }
|
|
|
|
|
2002-09-02 22:44:35 +04:00
|
|
|
// Linear address at the beginning of the page.
|
|
|
|
laddrPageOffset0 = laddr & 0xfffff000;
|
2002-09-14 07:01:05 +04:00
|
|
|
// Calculate RIP at the beginning of the page.
|
|
|
|
eipPageOffset0 = RIP - (laddr - laddrPageOffset0);
|
2002-09-02 22:44:35 +04:00
|
|
|
BX_CPU_THIS_PTR eipPageBias = - eipPageOffset0;
|
|
|
|
BX_CPU_THIS_PTR eipPageWindowSize = 4096; // FIXME:
|
2002-09-19 23:17:20 +04:00
|
|
|
BX_CPU_THIS_PTR pAddrA20Page = pAddr & 0xfffff000;
|
2002-09-02 22:44:35 +04:00
|
|
|
BX_CPU_THIS_PTR eipFetchPtr =
|
2002-09-19 23:17:20 +04:00
|
|
|
BX_CPU_THIS_PTR mem->getHostMemAddr(this, BX_CPU_THIS_PTR pAddrA20Page,
|
|
|
|
BX_READ);
|
2002-09-02 22:44:35 +04:00
|
|
|
|
|
|
|
// Sanity checks
|
|
|
|
if ( !BX_CPU_THIS_PTR eipFetchPtr ) {
|
2002-09-19 23:17:20 +04:00
|
|
|
if ( pAddr >= BX_CPU_THIS_PTR mem->len ) {
|
2002-09-14 07:01:05 +04:00
|
|
|
BX_PANIC(("prefetch: running in bogus memory"));
|
2002-09-02 22:44:35 +04:00
|
|
|
}
|
|
|
|
else {
|
2002-09-19 23:17:20 +04:00
|
|
|
BX_PANIC(("prefetch: getHostMemAddr vetoed direct read, pAddr=0x%x.",
|
|
|
|
pAddr));
|
2002-09-02 22:44:35 +04:00
|
|
|
}
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-22 05:52:21 +04:00
|
|
|
void
|
|
|
|
BX_CPU_C::boundaryFetch(bxInstruction_c *i)
|
|
|
|
{
|
|
|
|
unsigned j;
|
|
|
|
Bit8u fetchBuffer[16]; // Really only need 15
|
|
|
|
bx_address eipBiased, remainingInPage;
|
|
|
|
Bit8u *fetchPtr;
|
|
|
|
unsigned ret;
|
|
|
|
|
|
|
|
eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
|
|
|
remainingInPage = (BX_CPU_THIS_PTR eipPageWindowSize - eipBiased);
|
|
|
|
if (remainingInPage > 15) {
|
|
|
|
BX_PANIC(("fetch_decode: remaining > max ilen"));
|
|
|
|
}
|
|
|
|
fetchPtr = BX_CPU_THIS_PTR eipFetchPtr + eipBiased;
|
|
|
|
|
|
|
|
// Read all leftover bytes in current page up to boundary.
|
|
|
|
for (j=0; j<remainingInPage; j++) {
|
|
|
|
fetchBuffer[j] = *fetchPtr++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The 2nd chunk of the instruction is on the next page.
|
|
|
|
// Set RIP to the 0th byte of the 2nd page, and force a
|
|
|
|
// prefetch so direct access of that physical page is possible, and
|
|
|
|
// all the associated info is updated.
|
|
|
|
RIP += remainingInPage;
|
|
|
|
prefetch();
|
|
|
|
if (BX_CPU_THIS_PTR eipPageWindowSize < 15) {
|
|
|
|
BX_PANIC(("fetch_decode: small window size after prefetch"));
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can fetch straight from the 0th byte, which is eipFetchPtr;
|
|
|
|
fetchPtr = BX_CPU_THIS_PTR eipFetchPtr;
|
|
|
|
|
|
|
|
// read leftover bytes in next page
|
|
|
|
for (; j<15; j++) {
|
|
|
|
fetchBuffer[j] = *fetchPtr++;
|
|
|
|
}
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
|
|
|
|
ret = fetchDecode64(fetchBuffer, i, 15);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
ret = fetchDecode(fetchBuffer, i, 15);
|
|
|
|
}
|
|
|
|
// Restore EIP since we fudged it to start at the 2nd page boundary.
|
|
|
|
RIP = BX_CPU_THIS_PTR prev_eip;
|
|
|
|
if (ret==0)
|
|
|
|
BX_PANIC(("fetchDecode: cross boundary: ret==0"));
|
|
|
|
|
|
|
|
// Since we cross an instruction boundary, note that we need a prefetch()
|
|
|
|
// again on the next instruction. Perhaps we can optimize this to
|
|
|
|
// eliminate the extra prefetch() since we do it above, but have to
|
|
|
|
// think about repeated instructions, etc.
|
|
|
|
BX_CPU_THIS_PTR eipPageWindowSize = 0; // Fixme
|
2002-09-28 04:54:05 +04:00
|
|
|
|
|
|
|
BX_INSTR_OPCODE(CPU_ID, fetchBuffer, i->ilen(),
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b);
|
2002-09-22 05:52:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-09-18 09:36:48 +04:00
|
|
|
#if 0
|
|
|
|
// Now a no-op.
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
// If control has transfered locally, it is possible the prefetch Q is
|
|
|
|
// still valid. This would happen for repeat instructions, and small
|
|
|
|
// branches.
|
|
|
|
void
|
|
|
|
BX_CPU_C::revalidate_prefetch_q(void)
|
|
|
|
{
|
2002-09-18 09:36:48 +04:00
|
|
|
#warning "::revalidate_prefetch_q() is ifdef'd out."
|
2002-09-14 07:01:05 +04:00
|
|
|
bx_address eipBiased;
|
2002-09-02 22:44:35 +04:00
|
|
|
|
2002-09-14 07:01:05 +04:00
|
|
|
eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
2002-09-02 22:44:35 +04:00
|
|
|
if ( eipBiased < BX_CPU_THIS_PTR eipPageWindowSize ) {
|
|
|
|
// Good, EIP still within prefetch window.
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
else {
|
2002-09-02 22:44:35 +04:00
|
|
|
// EIP has branched outside the prefetch window. Mark the
|
|
|
|
// prefetch info as invalid, and requiring update.
|
|
|
|
BX_CPU_THIS_PTR eipPageWindowSize = 0;
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
}
|
2002-09-18 09:36:48 +04:00
|
|
|
#endif
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
|
2002-09-14 07:01:05 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
void
|
|
|
|
BX_CPU_C::ask (int level, const char *prefix, const char *fmt, va_list ap)
|
|
|
|
{
|
|
|
|
#if BX_EXTERNAL_DEBUGGER
|
|
|
|
char buf1[1024];
|
|
|
|
vsprintf (buf1, fmt, ap);
|
|
|
|
printf ("%s %s\n", prefix, buf1);
|
|
|
|
trap_debugger(1);
|
|
|
|
#else
|
|
|
|
this->logfunctions::ask(level,prefix,fmt,ap);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
#if BX_EXTERNAL_DEBUGGER
|
|
|
|
|
|
|
|
void
|
|
|
|
BX_CPU_C::trap_debugger (Boolean callnow)
|
|
|
|
{
|
|
|
|
regs.debug_state = debug_step;
|
|
|
|
if (callnow) {
|
|
|
|
bx_external_debugger(this);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif // #if BX_SUPPORT_X86_64
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
extern unsigned int dbg_show_mask;
|
|
|
|
|
|
|
|
Boolean
|
|
|
|
BX_CPU_C::dbg_is_begin_instr_bpoint(Bit32u cs, Bit32u eip, Bit32u laddr,
|
|
|
|
Bit32u is_32)
|
|
|
|
{
|
2001-09-27 18:19:38 +04:00
|
|
|
//fprintf (stderr, "begin_instr_bp: checking cs:eip %04x:%08x\n", cs, eip);
|
2001-05-24 22:46:34 +04:00
|
|
|
BX_CPU_THIS_PTR guard_found.cs = cs;
|
|
|
|
BX_CPU_THIS_PTR guard_found.eip = eip;
|
|
|
|
BX_CPU_THIS_PTR guard_found.laddr = laddr;
|
|
|
|
BX_CPU_THIS_PTR guard_found.is_32bit_code = is_32;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
// BW mode switch breakpoint
|
|
|
|
// instruction which generate exceptions never reach the end of the
|
2001-05-23 12:16:07 +04:00
|
|
|
// loop due to a long jump. Thats why we check at start of instr.
|
2001-04-10 05:04:59 +04:00
|
|
|
// Downside is that we show the instruction about to be executed
|
|
|
|
// (not the one generating the mode switch).
|
|
|
|
if (BX_CPU_THIS_PTR mode_break &&
|
2002-09-24 22:33:38 +04:00
|
|
|
(BX_CPU_THIS_PTR debug_vm != BX_CPU_THIS_PTR getB_VM ())) {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_INFO(("Caught vm mode switch breakpoint"));
|
2002-09-24 22:33:38 +04:00
|
|
|
BX_CPU_THIS_PTR debug_vm = BX_CPU_THIS_PTR getB_VM ();
|
2001-04-10 05:04:59 +04:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_MODE_BREAK_POINT;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if( (BX_CPU_THIS_PTR show_flag) & (dbg_show_mask)) {
|
|
|
|
int rv;
|
|
|
|
if((rv = bx_dbg_symbolic_output()))
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
// see if debugger is looking for iaddr breakpoint of any type
|
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_ALL) {
|
|
|
|
#if BX_DBG_SUPPORT_VIR_BPOINT
|
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_VIR) {
|
2001-05-24 22:46:34 +04:00
|
|
|
if (BX_CPU_THIS_PTR guard_found.icount!=0) {
|
2001-04-10 05:04:59 +04:00
|
|
|
for (unsigned i=0; i<bx_guard.iaddr.num_virtual; i++) {
|
|
|
|
if ( (bx_guard.iaddr.vir[i].cs == cs) &&
|
|
|
|
(bx_guard.iaddr.vir[i].eip == eip) ) {
|
2001-05-24 22:46:34 +04:00
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_VIR;
|
|
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = i;
|
2001-04-10 05:04:59 +04:00
|
|
|
return(1); // on a breakpoint
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if BX_DBG_SUPPORT_LIN_BPOINT
|
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_LIN) {
|
2001-05-24 22:46:34 +04:00
|
|
|
if (BX_CPU_THIS_PTR guard_found.icount!=0) {
|
2001-04-10 05:04:59 +04:00
|
|
|
for (unsigned i=0; i<bx_guard.iaddr.num_linear; i++) {
|
2001-05-24 22:46:34 +04:00
|
|
|
if ( bx_guard.iaddr.lin[i].addr == BX_CPU_THIS_PTR guard_found.laddr ) {
|
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_LIN;
|
|
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = i;
|
2001-04-10 05:04:59 +04:00
|
|
|
return(1); // on a breakpoint
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#if BX_DBG_SUPPORT_PHY_BPOINT
|
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_PHY) {
|
|
|
|
Bit32u phy;
|
|
|
|
Boolean valid;
|
2001-05-24 22:46:34 +04:00
|
|
|
dbg_xlate_linear2phy(BX_CPU_THIS_PTR guard_found.laddr,
|
2001-04-10 05:04:59 +04:00
|
|
|
&phy, &valid);
|
2001-09-28 03:41:18 +04:00
|
|
|
// The "guard_found.icount!=0" condition allows you to step or
|
|
|
|
// continue beyond a breakpoint. Bryce tried removing it once,
|
|
|
|
// and once you get to a breakpoint you are stuck there forever.
|
|
|
|
// Not pretty.
|
|
|
|
if (valid && (BX_CPU_THIS_PTR guard_found.icount!=0)) {
|
2001-04-10 05:04:59 +04:00
|
|
|
for (unsigned i=0; i<bx_guard.iaddr.num_physical; i++) {
|
|
|
|
if ( bx_guard.iaddr.phy[i].addr == phy ) {
|
2001-05-24 22:46:34 +04:00
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_PHY;
|
|
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = i;
|
2001-04-10 05:04:59 +04:00
|
|
|
return(1); // on a breakpoint
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return(0); // not on a breakpoint
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Boolean
|
|
|
|
BX_CPU_C::dbg_is_end_instr_bpoint(Bit32u cs, Bit32u eip, Bit32u laddr,
|
|
|
|
Bit32u is_32)
|
|
|
|
{
|
2001-09-27 18:19:38 +04:00
|
|
|
//fprintf (stderr, "end_instr_bp: checking for icount or ^C\n");
|
2001-05-24 22:46:34 +04:00
|
|
|
BX_CPU_THIS_PTR guard_found.icount++;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2001-09-27 18:19:38 +04:00
|
|
|
// convenient point to see if user typed Ctrl-C
|
|
|
|
if (bx_guard.interrupt_requested &&
|
|
|
|
(bx_guard.guard_for & BX_DBG_GUARD_CTRL_C)) {
|
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_CTRL_C;
|
|
|
|
return(1);
|
|
|
|
}
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
// see if debugger requesting icount guard
|
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_ICOUNT) {
|
2001-05-24 22:46:34 +04:00
|
|
|
if (BX_CPU_THIS_PTR guard_found.icount >= bx_guard.icount) {
|
|
|
|
BX_CPU_THIS_PTR guard_found.cs = cs;
|
|
|
|
BX_CPU_THIS_PTR guard_found.eip = eip;
|
|
|
|
BX_CPU_THIS_PTR guard_found.laddr = laddr;
|
|
|
|
BX_CPU_THIS_PTR guard_found.is_32bit_code = is_32;
|
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_ICOUNT;
|
2001-04-10 05:04:59 +04:00
|
|
|
return(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#if (BX_NUM_SIMULATORS >= 2)
|
|
|
|
// if async event pending, acknowlege them
|
|
|
|
if (bx_guard.async_changes_pending.which) {
|
|
|
|
if (bx_guard.async_changes_pending.which & BX_DBG_ASYNC_PENDING_A20)
|
|
|
|
bx_dbg_async_pin_ack(BX_DBG_ASYNC_PENDING_A20,
|
|
|
|
bx_guard.async_changes_pending.a20);
|
|
|
|
if (bx_guard.async_changes_pending.which) {
|
2001-05-30 22:56:02 +04:00
|
|
|
BX_PANIC(("decode: async pending unrecognized."));
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return(0); // no breakpoint
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
BX_CPU_C::dbg_take_irq(void)
|
|
|
|
{
|
|
|
|
unsigned vector;
|
|
|
|
|
|
|
|
// NOTE: similar code in ::cpu_loop()
|
|
|
|
|
2002-09-12 22:10:46 +04:00
|
|
|
if ( BX_CPU_THIS_PTR INTR && BX_CPU_THIS_PTR get_IF () ) {
|
2001-04-10 05:04:59 +04:00
|
|
|
if ( setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0 ) {
|
|
|
|
// normal return from setjmp setup
|
|
|
|
vector = BX_IAC(); // may set INTR with next interrupt
|
|
|
|
BX_CPU_THIS_PTR errorno = 0;
|
|
|
|
BX_CPU_THIS_PTR EXT = 1; // external event
|
|
|
|
BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
|
|
|
|
interrupt(vector, 0, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
BX_CPU_C::dbg_force_interrupt(unsigned vector)
|
|
|
|
{
|
|
|
|
// Used to force slave simulator to take an interrupt, without
|
|
|
|
// regard to IF
|
|
|
|
|
|
|
|
if ( setjmp(BX_CPU_THIS_PTR jmp_buf_env) == 0 ) {
|
|
|
|
// normal return from setjmp setup
|
|
|
|
BX_CPU_THIS_PTR errorno = 0;
|
|
|
|
BX_CPU_THIS_PTR EXT = 1; // external event
|
|
|
|
BX_CPU_THIS_PTR async_event = 1; // probably don't need this
|
|
|
|
interrupt(vector, 0, 0, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
BX_CPU_C::dbg_take_dma(void)
|
|
|
|
{
|
|
|
|
// NOTE: similar code in ::cpu_loop()
|
|
|
|
if ( BX_HRQ ) {
|
|
|
|
BX_CPU_THIS_PTR async_event = 1; // set in case INTR is triggered
|
|
|
|
BX_RAISE_HLDA();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // #if BX_DEBUGGER
|
|
|
|
|