2001-10-03 17:10:38 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2011-02-25 00:54:04 +03:00
|
|
|
// $Id$
|
2001-10-03 17:10:38 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2018-02-16 10:57:32 +03:00
|
|
|
// Copyright (C) 2001-2018 The Bochs Project
|
2001-04-10 05:04:59 +04:00
|
|
|
//
|
|
|
|
// This library is free software; you can redistribute it and/or
|
|
|
|
// modify it under the terms of the GNU Lesser General Public
|
|
|
|
// License as published by the Free Software Foundation; either
|
|
|
|
// version 2 of the License, or (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
// Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public
|
|
|
|
// License along with this library; if not, write to the Free Software
|
2009-01-16 21:18:59 +03:00
|
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
2007-11-18 02:28:33 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2001-05-24 22:46:34 +04:00
|
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
2001-04-10 05:04:59 +04:00
|
|
|
#include "bochs.h"
|
2006-03-07 01:03:16 +03:00
|
|
|
#include "cpu.h"
|
merge in BRANCH-io-cleanup.
To see the commit logs for this use either cvsweb or
cvs update -r BRANCH-io-cleanup and then 'cvs log' the various files.
In general this provides a generic interface for logging.
logfunctions:: is a class that is inherited by some classes, and also
. allocated as a standalone global called 'genlog'. All logging uses
. one of the ::info(), ::error(), ::ldebug(), ::panic() methods of this
. class through 'BX_INFO(), BX_ERROR(), BX_DEBUG(), BX_PANIC()' macros
. respectively.
.
. An example usage:
. BX_INFO(("Hello, World!\n"));
iofunctions:: is a class that is allocated once by default, and assigned
as the iofunction of each logfunctions instance. It is this class that
maintains the file descriptor and other output related code, at this
point using vfprintf(). At some future point, someone may choose to
write a gui 'console' for bochs to which messages would be redirected
simply by assigning a different iofunction class to the various logfunctions
objects.
More cleanup is coming, but this works for now. If you want to see alot
of debugging output, in main.cc, change onoff[LOGLEV_DEBUG]=0 to =1.
Comments, bugs, flames, to me: todd@fries.net
2001-05-15 18:49:57 +04:00
|
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2021-01-30 23:13:34 +03:00
|
|
|
#include "memory/memory-bochs.h"
|
2021-01-30 21:29:28 +03:00
|
|
|
#include "pc_system.h"
|
2014-10-14 19:59:10 +04:00
|
|
|
#include "cpustats.h"
|
2008-03-06 23:22:24 +03:00
|
|
|
|
2021-01-30 21:29:28 +03:00
|
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
|
|
|
|
|
|
|
#define BX_SYNC_TIME_IF_SINGLE_PROCESSOR(allowed_delta) { \
|
|
|
|
if (BX_SMP_PROCESSORS == 1) { \
|
|
|
|
Bit32u delta = (Bit32u)(BX_CPU_THIS_PTR icount - BX_CPU_THIS_PTR icount_last_sync); \
|
|
|
|
if (delta >= allowed_delta) { \
|
|
|
|
BX_CPU_THIS_PTR sync_icount(); \
|
|
|
|
BX_TICKN(delta); \
|
|
|
|
} \
|
|
|
|
} \
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
#define BX_SYNC_TIME_IF_SINGLE_PROCESSOR(allowed_delta) \
|
|
|
|
if (BX_SMP_PROCESSORS == 1) BX_TICK1()
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2019-08-09 22:57:13 +03:00
|
|
|
jmp_buf BX_CPU_C::jmp_buf_env;
|
|
|
|
|
2011-09-22 23:38:52 +04:00
|
|
|
void BX_CPU_C::cpu_loop(void)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR break_point = 0;
|
|
|
|
BX_CPU_THIS_PTR magic_break = 0;
|
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_NO_REASON;
|
|
|
|
#endif
|
|
|
|
|
2008-04-18 14:19:33 +04:00
|
|
|
if (setjmp(BX_CPU_THIS_PTR jmp_buf_env)) {
|
2011-09-05 21:14:49 +04:00
|
|
|
// can get here only from exception function or VMEXIT
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(0);
|
2008-10-01 13:44:40 +04:00
|
|
|
#if BX_DEBUGGER || BX_GDBSTUB
|
2015-12-21 01:44:54 +03:00
|
|
|
if (dbg_instruction_epilog()) return;
|
2007-09-26 22:07:39 +04:00
|
|
|
#endif
|
2005-04-12 22:08:10 +04:00
|
|
|
#if BX_GDBSTUB
|
2007-11-28 01:12:45 +03:00
|
|
|
if (bx_dbg.gdbstub_enabled) return;
|
2002-09-29 18:16:30 +04:00
|
|
|
#endif
|
2005-04-12 22:08:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2002-10-07 02:08:18 +04:00
|
|
|
// If the exception() routine has encountered a nasty fault scenario,
|
|
|
|
// the debugger may request that control is returned to it so that
|
|
|
|
// the situation may be examined.
|
2008-04-07 22:39:17 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
if (bx_guard.interrupt_requested) return;
|
2002-10-07 02:08:18 +04:00
|
|
|
#endif
|
|
|
|
|
2002-09-02 22:44:35 +04:00
|
|
|
// We get here either by a normal function call, or by a longjmp
|
|
|
|
// back from an exception() call. In either case, commit the
|
|
|
|
// new EIP/ESP, and set up other environmental fields. This code
|
|
|
|
// mirrors similar code below, after the interrupt() call.
|
2007-11-24 17:22:34 +03:00
|
|
|
BX_CPU_THIS_PTR prev_rip = RIP; // commit new EIP
|
|
|
|
BX_CPU_THIS_PTR speculative_rsp = 0;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2002-10-04 20:26:10 +04:00
|
|
|
while (1) {
|
2002-09-02 22:44:35 +04:00
|
|
|
|
2008-03-23 00:29:41 +03:00
|
|
|
// check on events which occurred for previous instructions (traps)
|
|
|
|
// and ones which are asynchronous to the CPU (hardware interrupts)
|
2006-05-24 20:46:57 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event) {
|
|
|
|
if (handleAsyncEvent()) {
|
|
|
|
// If request to return to caller ASAP.
|
|
|
|
return;
|
|
|
|
}
|
2002-10-04 20:26:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2011-01-26 14:48:13 +03:00
|
|
|
bxICacheEntry_c *entry = getICacheEntry();
|
2008-03-06 23:22:24 +03:00
|
|
|
bxInstruction_c *i = entry->i;
|
|
|
|
|
2011-09-22 23:38:52 +04:00
|
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
2011-09-05 21:14:49 +04:00
|
|
|
for(;;) {
|
|
|
|
// want to allow changing of the instruction inside instrumentation callback
|
|
|
|
BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
|
|
|
|
RIP += i->ilen();
|
|
|
|
// when handlers chaining is enabled this single call will execute entire trace
|
2012-09-04 19:45:05 +04:00
|
|
|
BX_CPU_CALL_METHOD(i->execute1, (i)); // might iterate repeat instruction
|
2011-09-05 21:14:49 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(0);
|
2011-09-06 19:35:39 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event) break;
|
2011-09-05 21:14:49 +04:00
|
|
|
|
|
|
|
i = getICacheEntry()->i;
|
|
|
|
}
|
|
|
|
#else // BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS == 0
|
|
|
|
|
2010-02-13 12:41:51 +03:00
|
|
|
bxInstruction_c *last = i + (entry->tlen);
|
2007-12-09 21:36:05 +03:00
|
|
|
|
2009-04-06 22:14:20 +04:00
|
|
|
for(;;) {
|
2008-06-23 06:56:31 +04:00
|
|
|
|
2015-12-21 01:44:54 +03:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
if (BX_CPU_THIS_PTR trace)
|
|
|
|
debug_disasm_instruction(BX_CPU_THIS_PTR prev_rip);
|
|
|
|
#endif
|
|
|
|
|
2010-01-24 15:48:42 +03:00
|
|
|
// want to allow changing of the instruction inside instrumentation callback
|
2007-12-09 21:36:05 +03:00
|
|
|
BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
|
2009-03-26 12:44:23 +03:00
|
|
|
RIP += i->ilen();
|
2012-09-04 19:45:05 +04:00
|
|
|
BX_CPU_CALL_METHOD(i->execute1, (i)); // might iterate repeat instruction
|
2007-12-09 21:36:05 +03:00
|
|
|
BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
|
|
|
|
BX_INSTR_AFTER_EXECUTION(BX_CPU_ID, i);
|
2011-12-21 10:17:45 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
2015-12-21 01:44:54 +03:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(0);
|
2015-12-21 01:44:54 +03:00
|
|
|
|
|
|
|
// note instructions generating exceptions never reach this point
|
|
|
|
#if BX_DEBUGGER || BX_GDBSTUB
|
|
|
|
if (dbg_instruction_epilog()) return;
|
|
|
|
#endif
|
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event) break;
|
2007-12-09 21:36:05 +03:00
|
|
|
|
2011-01-26 14:48:13 +03:00
|
|
|
if (++i == last) {
|
|
|
|
entry = getICacheEntry();
|
|
|
|
i = entry->i;
|
|
|
|
last = i + (entry->tlen);
|
|
|
|
}
|
2007-12-09 21:36:05 +03:00
|
|
|
}
|
2011-09-05 21:14:49 +04:00
|
|
|
#endif
|
2011-09-13 21:55:36 +04:00
|
|
|
|
|
|
|
// clear stop trace magic indication that probably was set by repeat or branch32/64
|
|
|
|
BX_CPU_THIS_PTR async_event &= ~BX_ASYNC_EVENT_STOP_TRACE;
|
|
|
|
|
2002-10-04 20:26:10 +04:00
|
|
|
} // while (1)
|
|
|
|
}
|
2002-09-19 23:17:20 +04:00
|
|
|
|
2011-09-22 23:38:52 +04:00
|
|
|
#if BX_SUPPORT_SMP
|
|
|
|
|
|
|
|
void BX_CPU_C::cpu_run_trace(void)
|
|
|
|
{
|
|
|
|
// check on events which occurred for previous instructions (traps)
|
|
|
|
// and ones which are asynchronous to the CPU (hardware interrupts)
|
|
|
|
if (BX_CPU_THIS_PTR async_event) {
|
|
|
|
if (handleAsyncEvent()) {
|
|
|
|
// If request to return to caller ASAP.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bxICacheEntry_c *entry = getICacheEntry();
|
|
|
|
bxInstruction_c *i = entry->i;
|
|
|
|
|
|
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
|
|
|
// want to allow changing of the instruction inside instrumentation callback
|
|
|
|
BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
|
|
|
|
RIP += i->ilen();
|
|
|
|
// when handlers chaining is enabled this single call will execute entire trace
|
2012-09-04 19:45:05 +04:00
|
|
|
BX_CPU_CALL_METHOD(i->execute1, (i)); // might iterate repeat instruction
|
2011-09-22 23:38:52 +04:00
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR async_event) {
|
|
|
|
// clear stop trace magic indication that probably was set by repeat or branch32/64
|
|
|
|
BX_CPU_THIS_PTR async_event &= ~BX_ASYNC_EVENT_STOP_TRACE;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
bxInstruction_c *last = i + (entry->tlen);
|
|
|
|
|
|
|
|
for(;;) {
|
|
|
|
// want to allow changing of the instruction inside instrumentation callback
|
|
|
|
BX_INSTR_BEFORE_EXECUTION(BX_CPU_ID, i);
|
|
|
|
RIP += i->ilen();
|
2012-09-04 19:45:05 +04:00
|
|
|
BX_CPU_CALL_METHOD(i->execute1, (i)); // might iterate repeat instruction
|
2011-09-22 23:38:52 +04:00
|
|
|
BX_CPU_THIS_PTR prev_rip = RIP; // commit new RIP
|
|
|
|
BX_INSTR_AFTER_EXECUTION(BX_CPU_ID, i);
|
|
|
|
BX_CPU_THIS_PTR icount++;
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR async_event) {
|
|
|
|
// clear stop trace magic indication that probably was set by repeat or branch32/64
|
|
|
|
BX_CPU_THIS_PTR async_event &= ~BX_ASYNC_EVENT_STOP_TRACE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (++i == last) break;
|
|
|
|
}
|
|
|
|
#endif // BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2019-12-20 10:42:07 +03:00
|
|
|
#include "decoder/ia_opcodes.h"
|
|
|
|
|
2011-01-26 14:48:13 +03:00
|
|
|
bxICacheEntry_c* BX_CPU_C::getICacheEntry(void)
|
|
|
|
{
|
|
|
|
bx_address eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
|
|
|
|
|
|
|
if (eipBiased >= BX_CPU_THIS_PTR eipPageWindowSize) {
|
|
|
|
prefetch();
|
|
|
|
eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
|
|
|
}
|
|
|
|
|
2014-10-14 19:59:10 +04:00
|
|
|
INC_ICACHE_STAT(iCacheLookups);
|
2011-01-26 14:48:13 +03:00
|
|
|
|
2012-09-04 19:45:05 +04:00
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrFetchPage + eipBiased;
|
|
|
|
bxICacheEntry_c *entry = BX_CPU_THIS_PTR iCache.find_entry(pAddr, BX_CPU_THIS_PTR fetchModeMask);
|
|
|
|
|
|
|
|
if (entry == NULL)
|
2011-01-26 14:48:13 +03:00
|
|
|
{
|
|
|
|
// iCache miss. No validated instruction with matching fetch parameters
|
|
|
|
// is in the iCache.
|
2014-10-14 19:59:10 +04:00
|
|
|
INC_ICACHE_STAT(iCacheMisses);
|
2016-02-22 22:57:24 +03:00
|
|
|
entry = serveICacheMiss((Bit32u) eipBiased, pAddr);
|
2011-01-26 14:48:13 +03:00
|
|
|
}
|
|
|
|
|
2019-12-20 10:42:07 +03:00
|
|
|
#if BX_SUPPORT_CET
|
|
|
|
if (WaitingForEndbranch(CPL)) {
|
|
|
|
bxInstruction_c *i = entry->i;
|
|
|
|
if (i->getIaOpcode() != (long64_mode() ? BX_IA_ENDBRANCH64 : BX_IA_ENDBRANCH32) && i->getIaOpcode() != BX_IA_INT3) {
|
|
|
|
if (LegacyEndbranchTreatment(CPL)) {
|
|
|
|
BX_ERROR(("Endbranch is expected for CPL=%d", CPL));
|
|
|
|
exception(BX_CP_EXCEPTION, BX_CP_ENDBRANCH);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-01-26 14:48:13 +03:00
|
|
|
return entry;
|
|
|
|
}
|
|
|
|
|
2014-05-01 22:30:23 +04:00
|
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS && BX_ENABLE_TRACE_LINKING
|
2012-08-21 23:58:41 +04:00
|
|
|
|
|
|
|
// The function is called after taken branch instructions and tries to link the branch to the next trace
|
2018-02-16 10:57:32 +03:00
|
|
|
void BX_CPP_AttrRegparmN(1) BX_CPU_C::linkTrace(bxInstruction_c *i)
|
2012-08-21 23:58:41 +04:00
|
|
|
{
|
|
|
|
#if BX_SUPPORT_SMP
|
|
|
|
if (BX_SMP_PROCESSORS > 1)
|
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
|
2014-10-15 22:00:04 +04:00
|
|
|
#define BX_HANDLERS_CHAINING_MAX_DEPTH 1000
|
|
|
|
|
|
|
|
// do not allow extreme trace link depth / avoid host stack overflow
|
|
|
|
// (could happen with badly compiled instruction handlers)
|
|
|
|
static Bit32u linkDepth = 0;
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR async_event || ++linkDepth > BX_HANDLERS_CHAINING_MAX_DEPTH) {
|
|
|
|
linkDepth = 0;
|
|
|
|
return;
|
|
|
|
}
|
2012-08-21 23:58:41 +04:00
|
|
|
|
2012-11-27 19:40:45 +04:00
|
|
|
Bit32u delta = (Bit32u) (BX_CPU_THIS_PTR icount - BX_CPU_THIS_PTR icount_last_sync);
|
2014-10-15 22:00:04 +04:00
|
|
|
if(delta >= bx_pc_system.getNumCpuTicksLeftNextEvent()) {
|
|
|
|
linkDepth = 0;
|
2012-08-21 23:58:41 +04:00
|
|
|
return;
|
2014-10-15 22:00:04 +04:00
|
|
|
}
|
2012-08-21 23:58:41 +04:00
|
|
|
|
2013-06-29 14:16:28 +04:00
|
|
|
bxInstruction_c *next = i->getNextTrace(BX_CPU_THIS_PTR iCache.traceLinkTimeStamp);
|
2012-08-21 23:58:41 +04:00
|
|
|
if (next) {
|
|
|
|
BX_EXECUTE_INSTRUCTION(next);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-06-23 19:45:25 +04:00
|
|
|
bx_address eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
2012-08-21 23:58:41 +04:00
|
|
|
if (eipBiased >= BX_CPU_THIS_PTR eipPageWindowSize) {
|
|
|
|
prefetch();
|
|
|
|
eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
|
|
|
}
|
|
|
|
|
2014-10-14 19:59:10 +04:00
|
|
|
INC_ICACHE_STAT(iCacheLookups);
|
2012-08-21 23:58:41 +04:00
|
|
|
|
2012-09-04 19:45:05 +04:00
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrFetchPage + eipBiased;
|
|
|
|
bxICacheEntry_c *entry = BX_CPU_THIS_PTR iCache.find_entry(pAddr, BX_CPU_THIS_PTR fetchModeMask);
|
|
|
|
|
|
|
|
if (entry != NULL) // link traces - handle only hit cases
|
2012-08-21 23:58:41 +04:00
|
|
|
{
|
2013-06-29 14:16:28 +04:00
|
|
|
i->setNextTrace(entry->i, BX_CPU_THIS_PTR iCache.traceLinkTimeStamp);
|
2012-08-21 23:58:41 +04:00
|
|
|
i = entry->i;
|
|
|
|
BX_EXECUTE_INSTRUCTION(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2012-04-06 13:41:58 +04:00
|
|
|
#define BX_REPEAT_TIME_UPDATE_INTERVAL (BX_MAX_TRACE_LENGTH-1)
|
2011-09-13 21:55:36 +04:00
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat(bxInstruction_c *i, BxRepIterationPtr_tR execute)
|
2007-01-05 16:40:47 +03:00
|
|
|
{
|
|
|
|
// non repeated instruction
|
|
|
|
if (! i->repUsedL()) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2007-01-05 16:40:47 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-30 12:13:19 +03:00
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR in_repeat = 0;
|
|
|
|
#endif
|
|
|
|
|
2007-01-05 16:40:47 +03:00
|
|
|
#if BX_SUPPORT_X86_64
|
2007-10-31 01:15:42 +03:00
|
|
|
if (i->as64L()) {
|
|
|
|
while(1) {
|
2007-01-05 16:40:47 +03:00
|
|
|
if (RCX != 0) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2007-01-05 16:40:47 +03:00
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
RCX --;
|
|
|
|
}
|
|
|
|
if (RCX == 0) return;
|
2007-10-31 01:15:42 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
|
|
|
#endif
|
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
2011-12-21 10:17:45 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
2007-10-31 01:15:42 +03:00
|
|
|
}
|
|
|
|
else
|
2007-01-05 16:40:47 +03:00
|
|
|
#endif
|
2007-10-31 01:15:42 +03:00
|
|
|
if (i->as32L()) {
|
|
|
|
while(1) {
|
2007-01-05 16:40:47 +03:00
|
|
|
if (ECX != 0) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2007-01-05 16:40:47 +03:00
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
2007-09-25 20:11:32 +04:00
|
|
|
RCX = ECX - 1;
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
|
|
|
if (ECX == 0) return;
|
2007-10-31 01:15:42 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
|
|
|
#endif
|
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
2011-12-21 10:17:45 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
2007-10-31 01:15:42 +03:00
|
|
|
}
|
|
|
|
else // 16bit addrsize
|
|
|
|
{
|
|
|
|
while(1) {
|
2007-01-05 16:40:47 +03:00
|
|
|
if (CX != 0) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2007-01-05 16:40:47 +03:00
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
CX --;
|
|
|
|
}
|
|
|
|
if (CX == 0) return;
|
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2007-10-31 01:15:42 +03:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2007-01-05 16:40:47 +03:00
|
|
|
#endif
|
2007-10-31 01:15:42 +03:00
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
2011-12-21 10:17:45 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
2007-10-31 01:15:42 +03:00
|
|
|
}
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
|
|
|
|
2009-10-30 12:13:19 +03:00
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR in_repeat = 1;
|
|
|
|
#endif
|
|
|
|
|
2007-11-24 17:22:34 +03:00
|
|
|
RIP = BX_CPU_THIS_PTR prev_rip; // repeat loop not done, restore RIP
|
2008-03-31 22:53:08 +04:00
|
|
|
|
|
|
|
// assert magic async_event to stop trace execution
|
|
|
|
BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::repeat_ZF(bxInstruction_c *i, BxRepIterationPtr_tR execute)
|
2008-10-09 00:15:37 +04:00
|
|
|
{
|
2013-11-09 01:43:21 +04:00
|
|
|
unsigned rep = i->lockRepUsedValue();
|
2008-10-09 00:15:37 +04:00
|
|
|
|
2008-10-09 00:40:26 +04:00
|
|
|
// non repeated instruction
|
2013-11-09 01:43:21 +04:00
|
|
|
if (rep < 2) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2008-10-09 00:40:26 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-10-30 12:13:19 +03:00
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR in_repeat = 0;
|
|
|
|
#endif
|
|
|
|
|
2008-10-09 00:40:26 +04:00
|
|
|
if (rep == 3) { /* repeat prefix 0xF3 */
|
2008-10-09 00:15:37 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
2008-10-09 00:40:26 +04:00
|
|
|
if (i->as64L()) {
|
|
|
|
while(1) {
|
|
|
|
if (RCX != 0) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
RCX --;
|
|
|
|
}
|
|
|
|
if (! get_ZF() || RCX == 0) return;
|
2008-10-09 00:15:37 +04:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2008-10-09 00:15:37 +04:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-10-09 00:15:37 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
2011-12-21 10:17:45 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
2008-10-09 00:40:26 +04:00
|
|
|
}
|
2008-10-09 00:15:37 +04:00
|
|
|
}
|
2008-10-09 00:40:26 +04:00
|
|
|
else
|
2008-10-09 00:15:37 +04:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
if (i->as32L()) {
|
|
|
|
while(1) {
|
|
|
|
if (ECX != 0) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
RCX = ECX - 1;
|
|
|
|
}
|
|
|
|
if (! get_ZF() || ECX == 0) return;
|
2008-10-09 00:15:37 +04:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2008-10-09 00:15:37 +04:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-10-09 00:15:37 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
2011-12-21 10:17:45 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
2008-10-09 00:15:37 +04:00
|
|
|
}
|
2008-10-09 00:40:26 +04:00
|
|
|
}
|
|
|
|
else // 16bit addrsize
|
|
|
|
{
|
|
|
|
while(1) {
|
|
|
|
if (CX != 0) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
CX --;
|
|
|
|
}
|
|
|
|
if (! get_ZF() || CX == 0) return;
|
2008-10-09 00:15:37 +04:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2008-10-09 00:15:37 +04:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-10-09 00:15:37 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
2011-12-21 10:17:45 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
2008-10-09 00:40:26 +04:00
|
|
|
}
|
2008-10-09 00:15:37 +04:00
|
|
|
}
|
|
|
|
}
|
2008-10-09 00:40:26 +04:00
|
|
|
else { /* repeat prefix 0xF2 */
|
2007-01-05 16:40:47 +03:00
|
|
|
#if BX_SUPPORT_X86_64
|
2008-10-09 00:40:26 +04:00
|
|
|
if (i->as64L()) {
|
|
|
|
while(1) {
|
|
|
|
if (RCX != 0) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
RCX --;
|
|
|
|
}
|
|
|
|
if (get_ZF() || RCX == 0) return;
|
2007-10-31 01:15:42 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2007-10-31 01:15:42 +03:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
2011-12-21 10:17:45 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
2008-10-09 00:40:26 +04:00
|
|
|
}
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
2008-10-09 00:40:26 +04:00
|
|
|
else
|
2007-01-05 16:40:47 +03:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
if (i->as32L()) {
|
|
|
|
while(1) {
|
|
|
|
if (ECX != 0) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
RCX = ECX - 1;
|
|
|
|
}
|
|
|
|
if (get_ZF() || ECX == 0) return;
|
2007-10-31 01:15:42 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2007-10-31 01:15:42 +03:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
2011-12-21 10:17:45 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
2008-10-09 00:40:26 +04:00
|
|
|
}
|
|
|
|
else // 16bit addrsize
|
|
|
|
{
|
|
|
|
while(1) {
|
|
|
|
if (CX != 0) {
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_CPU_CALL_REP_ITERATION(execute, (i));
|
2008-10-09 00:40:26 +04:00
|
|
|
BX_INSTR_REPEAT_ITERATION(BX_CPU_ID, i);
|
|
|
|
CX --;
|
|
|
|
}
|
|
|
|
if (get_ZF() || CX == 0) return;
|
2007-01-05 16:40:47 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER == 0
|
2008-10-09 00:40:26 +04:00
|
|
|
if (BX_CPU_THIS_PTR async_event)
|
2007-01-05 16:40:47 +03:00
|
|
|
#endif
|
2008-10-09 00:40:26 +04:00
|
|
|
break; // exit always if debugger enabled
|
2008-04-16 20:44:06 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_CPU_THIS_PTR icount++;
|
2011-12-21 10:17:45 +04:00
|
|
|
|
2011-09-13 21:55:36 +04:00
|
|
|
BX_SYNC_TIME_IF_SINGLE_PROCESSOR(BX_REPEAT_TIME_UPDATE_INTERVAL);
|
2008-10-09 00:40:26 +04:00
|
|
|
}
|
2007-10-31 01:15:42 +03:00
|
|
|
}
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
|
|
|
|
2009-10-30 12:13:19 +03:00
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR in_repeat = 1;
|
|
|
|
#endif
|
|
|
|
|
2007-11-24 17:22:34 +03:00
|
|
|
RIP = BX_CPU_THIS_PTR prev_rip; // repeat loop not done, restore RIP
|
2008-03-31 22:53:08 +04:00
|
|
|
|
|
|
|
// assert magic async_event to stop trace execution
|
|
|
|
BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
|
2007-01-05 16:40:47 +03:00
|
|
|
}
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
// boundaries of consideration:
|
|
|
|
//
|
|
|
|
// * physical memory boundary: 1024k (1Megabyte) (increments of...)
|
|
|
|
// * A20 boundary: 1024k (1Megabyte)
|
|
|
|
// * page boundary: 4k
|
|
|
|
// * ROM boundary: 2k (dont care since we are only reading)
|
|
|
|
// * segment boundary: any
|
|
|
|
|
2005-02-12 17:00:13 +03:00
|
|
|
void BX_CPU_C::prefetch(void)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
2008-10-11 00:49:16 +04:00
|
|
|
bx_address laddr;
|
|
|
|
unsigned pageOffset;
|
2005-08-05 22:23:36 +04:00
|
|
|
|
2014-10-14 19:59:10 +04:00
|
|
|
INC_ICACHE_STAT(iCachePrefetch);
|
|
|
|
|
2008-05-10 19:02:42 +04:00
|
|
|
#if BX_SUPPORT_X86_64
|
2009-11-30 00:01:26 +03:00
|
|
|
if (long64_mode()) {
|
2008-05-10 19:02:42 +04:00
|
|
|
if (! IsCanonical(RIP)) {
|
|
|
|
BX_ERROR(("prefetch: #GP(0): RIP crossed canonical boundary"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-05-10 19:02:42 +04:00
|
|
|
}
|
2008-10-11 00:49:16 +04:00
|
|
|
|
|
|
|
// linear address is equal to RIP in 64-bit long mode
|
|
|
|
pageOffset = PAGE_OFFSET(EIP);
|
|
|
|
laddr = RIP;
|
|
|
|
|
|
|
|
// Calculate RIP at the beginning of the page.
|
|
|
|
BX_CPU_THIS_PTR eipPageBias = pageOffset - RIP;
|
|
|
|
BX_CPU_THIS_PTR eipPageWindowSize = 4096;
|
2008-05-10 19:02:42 +04:00
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
2012-07-08 22:16:25 +04:00
|
|
|
|
|
|
|
#if BX_CPU_LEVEL >= 5
|
2012-06-27 19:09:10 +04:00
|
|
|
if (USER_PL && BX_CPU_THIS_PTR get_VIP() && BX_CPU_THIS_PTR get_VIF()) {
|
|
|
|
if (BX_CPU_THIS_PTR cr4.get_PVI() | (v8086_mode() && BX_CPU_THIS_PTR cr4.get_VME())) {
|
|
|
|
BX_ERROR(("prefetch: inconsistent VME state"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
}
|
2012-07-08 22:16:25 +04:00
|
|
|
#endif
|
2012-06-27 19:09:10 +04:00
|
|
|
|
2008-10-07 02:19:22 +04:00
|
|
|
BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP); /* avoid 32-bit EIP wrap */
|
2012-03-29 01:11:19 +04:00
|
|
|
laddr = get_laddr32(BX_SEG_REG_CS, EIP);
|
2008-10-11 00:49:16 +04:00
|
|
|
pageOffset = PAGE_OFFSET(laddr);
|
|
|
|
|
|
|
|
// Calculate RIP at the beginning of the page.
|
2008-11-11 20:44:19 +03:00
|
|
|
BX_CPU_THIS_PTR eipPageBias = (bx_address) pageOffset - EIP;
|
2008-10-11 00:49:16 +04:00
|
|
|
|
|
|
|
Bit32u limit = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled;
|
|
|
|
if (EIP > limit) {
|
|
|
|
BX_ERROR(("prefetch: EIP [%08x] > CS.limit [%08x]", EIP, limit));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2005-08-05 22:23:36 +04:00
|
|
|
}
|
2008-11-10 01:33:44 +03:00
|
|
|
|
2008-11-11 20:44:19 +03:00
|
|
|
BX_CPU_THIS_PTR eipPageWindowSize = 4096;
|
|
|
|
if (limit + BX_CPU_THIS_PTR eipPageBias < 4096) {
|
2009-04-07 20:12:19 +04:00
|
|
|
BX_CPU_THIS_PTR eipPageWindowSize = (Bit32u)(limit + BX_CPU_THIS_PTR eipPageBias + 1);
|
2008-11-11 20:44:19 +03:00
|
|
|
}
|
2005-02-05 23:56:44 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2011-06-24 17:38:34 +04:00
|
|
|
#if BX_X86_DEBUGGER
|
|
|
|
if (hwbreakpoint_check(laddr, BX_HWDebugInstruction, BX_HWDebugInstruction)) {
|
2012-09-25 13:35:38 +04:00
|
|
|
signal_event(BX_EVENT_CODE_BREAKPOINT_ASSIST);
|
2011-12-21 10:17:45 +04:00
|
|
|
if (! interrupts_inhibited(BX_INHIBIT_DEBUG)) {
|
2011-06-24 17:38:34 +04:00
|
|
|
// The next instruction could already hit a code breakpoint but
|
|
|
|
// async_event won't take effect immediatelly.
|
|
|
|
// Check if the next executing instruction hits code breakpoint
|
2011-09-06 17:09:45 +04:00
|
|
|
|
|
|
|
// check only if not fetching page cross instruction
|
|
|
|
// this check is 32-bit wrap safe as well
|
|
|
|
if (EIP == (Bit32u) BX_CPU_THIS_PTR prev_rip) {
|
2012-07-11 19:07:54 +04:00
|
|
|
Bit32u dr6_bits = code_breakpoint_match(laddr);
|
|
|
|
if (dr6_bits & BX_DEBUG_TRAP_HIT) {
|
2019-12-09 19:29:23 +03:00
|
|
|
BX_ERROR(("#DB: x86 code breakpoint caught"));
|
2012-07-11 19:07:54 +04:00
|
|
|
BX_CPU_THIS_PTR debug_trap |= dr6_bits;
|
|
|
|
exception(BX_DB_EXCEPTION, 0);
|
|
|
|
}
|
2011-09-05 21:14:49 +04:00
|
|
|
}
|
2011-06-24 17:38:34 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2012-09-25 13:35:38 +04:00
|
|
|
clear_event(BX_EVENT_CODE_BREAKPOINT_ASSIST);
|
2011-06-24 17:38:34 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR clear_RF();
|
|
|
|
|
2008-04-06 00:41:00 +04:00
|
|
|
bx_address lpf = LPFOf(laddr);
|
2019-12-09 21:46:36 +03:00
|
|
|
bx_TLB_entry *tlbEntry = BX_ITLB_ENTRY_OF(laddr);
|
2008-04-06 00:41:00 +04:00
|
|
|
Bit8u *fetchPtr = 0;
|
|
|
|
|
2021-02-11 18:05:06 +03:00
|
|
|
if ((tlbEntry->lpf == lpf) && (tlbEntry->accessBits & (1 << unsigned(USER_PL))) != 0) {
|
2012-03-13 19:18:21 +04:00
|
|
|
BX_CPU_THIS_PTR pAddrFetchPage = tlbEntry->ppf;
|
2009-03-09 00:23:40 +03:00
|
|
|
fetchPtr = (Bit8u*) tlbEntry->hostPageAddr;
|
2008-04-06 00:41:00 +04:00
|
|
|
}
|
|
|
|
else {
|
2013-01-27 23:27:30 +04:00
|
|
|
bx_phy_address pAddr = translate_linear(tlbEntry, laddr, USER_PL, BX_EXECUTE);
|
2012-03-13 19:18:21 +04:00
|
|
|
BX_CPU_THIS_PTR pAddrFetchPage = PPFOf(pAddr);
|
2008-10-11 01:09:25 +04:00
|
|
|
}
|
2008-04-06 00:41:00 +04:00
|
|
|
|
|
|
|
if (fetchPtr) {
|
|
|
|
BX_CPU_THIS_PTR eipFetchPtr = fetchPtr;
|
|
|
|
}
|
|
|
|
else {
|
2012-03-13 19:18:21 +04:00
|
|
|
BX_CPU_THIS_PTR eipFetchPtr = (const Bit8u*) getHostMemAddr(BX_CPU_THIS_PTR pAddrFetchPage, BX_EXECUTE);
|
2002-09-02 22:44:35 +04:00
|
|
|
|
2009-06-09 19:23:28 +04:00
|
|
|
// Sanity checks
|
|
|
|
if (! BX_CPU_THIS_PTR eipFetchPtr) {
|
2012-03-13 19:18:21 +04:00
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrFetchPage + pageOffset;
|
2009-06-09 19:23:28 +04:00
|
|
|
if (pAddr >= BX_MEM(0)->get_memory_len()) {
|
|
|
|
BX_PANIC(("prefetch: running in bogus memory, pAddr=0x" FMT_PHY_ADDRX, pAddr));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
BX_PANIC(("prefetch: getHostMemAddr vetoed direct read, pAddr=0x" FMT_PHY_ADDRX, pAddr));
|
|
|
|
}
|
2002-09-02 22:44:35 +04:00
|
|
|
}
|
2005-01-13 22:03:40 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2008-10-01 13:44:40 +04:00
|
|
|
#if BX_DEBUGGER || BX_GDBSTUB
|
2021-01-30 11:35:35 +03:00
|
|
|
bool BX_CPU_C::dbg_instruction_epilog(void)
|
2007-03-06 20:47:18 +03:00
|
|
|
{
|
|
|
|
#if BX_DEBUGGER
|
2007-09-26 22:07:39 +04:00
|
|
|
bx_address debug_eip = RIP;
|
2006-05-24 20:46:57 +04:00
|
|
|
Bit16u cs = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value;
|
2005-08-15 09:32:36 +04:00
|
|
|
|
2001-05-24 22:46:34 +04:00
|
|
|
BX_CPU_THIS_PTR guard_found.cs = cs;
|
2006-05-24 20:46:57 +04:00
|
|
|
BX_CPU_THIS_PTR guard_found.eip = debug_eip;
|
2012-03-29 01:11:19 +04:00
|
|
|
BX_CPU_THIS_PTR guard_found.laddr = get_laddr(BX_SEG_REG_CS, debug_eip);
|
2009-03-23 00:12:35 +03:00
|
|
|
BX_CPU_THIS_PTR guard_found.code_32_64 = BX_CPU_THIS_PTR fetchModeMask;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2009-03-17 22:40:26 +03:00
|
|
|
//
|
|
|
|
// Take care of break point conditions generated during instruction execution
|
|
|
|
//
|
|
|
|
|
|
|
|
// Check if we hit read/write or time breakpoint
|
|
|
|
if (BX_CPU_THIS_PTR break_point) {
|
2012-06-03 22:46:20 +04:00
|
|
|
Bit64u tt = bx_pc_system.time_ticks();
|
2009-03-17 22:40:26 +03:00
|
|
|
switch (BX_CPU_THIS_PTR break_point) {
|
|
|
|
case BREAK_POINT_TIME:
|
2009-04-11 17:58:34 +04:00
|
|
|
BX_INFO(("[" FMT_LL "d] Caught time breakpoint", tt));
|
2009-03-17 22:40:26 +03:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_TIME_BREAK_POINT;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
case BREAK_POINT_READ:
|
2009-04-11 17:58:34 +04:00
|
|
|
BX_INFO(("[" FMT_LL "d] Caught read watch point", tt));
|
2009-03-17 22:40:26 +03:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_READ_WATCH_POINT;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
case BREAK_POINT_WRITE:
|
2009-04-11 17:58:34 +04:00
|
|
|
BX_INFO(("[" FMT_LL "d] Caught write watch point", tt));
|
2009-03-17 22:40:26 +03:00
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_WRITE_WATCH_POINT;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
default:
|
|
|
|
BX_PANIC(("Weird break point condition"));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR magic_break) {
|
|
|
|
BX_INFO(("[" FMT_LL "d] Stopped on MAGIC BREAKPOINT", bx_pc_system.time_ticks()));
|
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_MAGIC_BREAK_POINT;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
}
|
|
|
|
|
2011-08-17 23:51:32 +04:00
|
|
|
// see if debugger requesting icount guard
|
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_ICOUNT) {
|
2015-12-21 01:44:54 +03:00
|
|
|
if (get_icount() >= BX_CPU_THIS_PTR guard_found.icount_max) {
|
2011-08-17 23:51:32 +04:00
|
|
|
return(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-21 01:44:54 +03:00
|
|
|
// convenient point to see if user requested debug break or typed Ctrl-C
|
|
|
|
if (bx_guard.interrupt_requested) {
|
|
|
|
return(1);
|
|
|
|
}
|
|
|
|
|
2006-02-12 23:21:36 +03:00
|
|
|
// support for 'show' command in debugger
|
2009-03-17 22:40:26 +03:00
|
|
|
extern unsigned dbg_show_mask;
|
2006-02-12 23:21:36 +03:00
|
|
|
if(dbg_show_mask) {
|
|
|
|
int rv = bx_dbg_show_symbolic();
|
|
|
|
if (rv) return(rv);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2009-03-17 22:40:26 +03:00
|
|
|
// Just committed an instruction, before fetching a new one
|
2001-04-10 05:04:59 +04:00
|
|
|
// see if debugger is looking for iaddr breakpoint of any type
|
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_ALL) {
|
2007-10-13 02:11:25 +04:00
|
|
|
#if (BX_DBG_MAX_VIR_BPOINTS > 0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_VIR) {
|
2009-04-11 17:58:34 +04:00
|
|
|
for (unsigned n=0; n<bx_guard.iaddr.num_virtual; n++) {
|
|
|
|
if (bx_guard.iaddr.vir[n].enabled &&
|
|
|
|
(bx_guard.iaddr.vir[n].cs == cs) &&
|
|
|
|
(bx_guard.iaddr.vir[n].eip == debug_eip))
|
|
|
|
{
|
2017-08-22 21:47:18 +03:00
|
|
|
if (! bx_guard.iaddr.vir[n].condition || bx_dbg_eval_condition(bx_guard.iaddr.vir[n].condition)) {
|
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_VIR;
|
|
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = n;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
}
|
2005-04-12 22:08:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
#endif
|
2007-10-13 02:11:25 +04:00
|
|
|
#if (BX_DBG_MAX_LIN_BPOINTS > 0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_LIN) {
|
2009-04-11 17:58:34 +04:00
|
|
|
for (unsigned n=0; n<bx_guard.iaddr.num_linear; n++) {
|
|
|
|
if (bx_guard.iaddr.lin[n].enabled &&
|
|
|
|
(bx_guard.iaddr.lin[n].addr == BX_CPU_THIS_PTR guard_found.laddr))
|
|
|
|
{
|
2017-08-22 21:47:18 +03:00
|
|
|
if (! bx_guard.iaddr.lin[n].condition || bx_dbg_eval_condition(bx_guard.iaddr.lin[n].condition)) {
|
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_LIN;
|
|
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = n;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
}
|
2005-04-12 22:08:10 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
#endif
|
2007-10-13 02:11:25 +04:00
|
|
|
#if (BX_DBG_MAX_PHY_BPOINTS > 0)
|
2001-04-10 05:04:59 +04:00
|
|
|
if (bx_guard.guard_for & BX_DBG_GUARD_IADDR_PHY) {
|
2006-06-17 16:09:55 +04:00
|
|
|
bx_phy_address phy;
|
2021-01-30 11:35:35 +03:00
|
|
|
bool valid = dbg_xlate_linear2phy(BX_CPU_THIS_PTR guard_found.laddr, &phy);
|
2009-04-11 18:02:13 +04:00
|
|
|
if (valid) {
|
|
|
|
for (unsigned n=0; n<bx_guard.iaddr.num_physical; n++) {
|
|
|
|
if (bx_guard.iaddr.phy[n].enabled && (bx_guard.iaddr.phy[n].addr == phy))
|
|
|
|
{
|
2017-08-22 21:47:18 +03:00
|
|
|
if (! bx_guard.iaddr.phy[n].condition || bx_dbg_eval_condition(bx_guard.iaddr.phy[n].condition)) {
|
|
|
|
BX_CPU_THIS_PTR guard_found.guard_found = BX_DBG_GUARD_IADDR_PHY;
|
|
|
|
BX_CPU_THIS_PTR guard_found.iaddr_index = n;
|
|
|
|
return(1); // on a breakpoint
|
|
|
|
}
|
2009-04-11 18:02:13 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2005-04-12 22:08:10 +04:00
|
|
|
#endif
|
|
|
|
}
|
2009-03-17 22:40:26 +03:00
|
|
|
#endif
|
2005-08-28 21:37:37 +04:00
|
|
|
|
2009-03-17 22:40:26 +03:00
|
|
|
#if BX_GDBSTUB
|
|
|
|
if (bx_dbg.gdbstub_enabled) {
|
|
|
|
unsigned reason = bx_gdbstub_check(EIP);
|
|
|
|
if (reason != GDBSTUB_STOP_NO_REASON) return(1);
|
2006-04-29 20:14:47 +04:00
|
|
|
}
|
2009-03-17 22:40:26 +03:00
|
|
|
#endif
|
2006-04-29 20:14:47 +04:00
|
|
|
|
2009-03-17 22:40:26 +03:00
|
|
|
return(0);
|
2006-04-29 20:14:47 +04:00
|
|
|
}
|
2009-03-17 22:40:26 +03:00
|
|
|
#endif // BX_DEBUGGER || BX_GDBSTUB
|