2001-10-03 17:10:38 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2011-02-25 00:54:04 +03:00
|
|
|
// $Id$
|
2001-10-03 17:10:38 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2012-08-05 17:52:40 +04:00
|
|
|
// Copyright (C) 2001-2012 The Bochs Project
|
2001-04-10 05:04:59 +04:00
|
|
|
//
|
|
|
|
// This library is free software; you can redistribute it and/or
|
|
|
|
// modify it under the terms of the GNU Lesser General Public
|
|
|
|
// License as published by the Free Software Foundation; either
|
|
|
|
// version 2 of the License, or (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
// Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public
|
|
|
|
// License along with this library; if not, write to the Free Software
|
2009-01-16 21:18:59 +03:00
|
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
2007-11-17 21:08:46 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2001-05-24 22:46:34 +04:00
|
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
2001-04-10 05:04:59 +04:00
|
|
|
#include "bochs.h"
|
2006-03-07 01:03:16 +03:00
|
|
|
#include "cpu.h"
|
merge in BRANCH-io-cleanup.
To see the commit logs for this use either cvsweb or
cvs update -r BRANCH-io-cleanup and then 'cvs log' the various files.
In general this provides a generic interface for logging.
logfunctions:: is a class that is inherited by some classes, and also
. allocated as a standalone global called 'genlog'. All logging uses
. one of the ::info(), ::error(), ::ldebug(), ::panic() methods of this
. class through 'BX_INFO(), BX_ERROR(), BX_DEBUG(), BX_PANIC()' macros
. respectively.
.
. An example usage:
. BX_INFO(("Hello, World!\n"));
iofunctions:: is a class that is allocated once by default, and assigned
as the iofunction of each logfunctions instance. It is this class that
maintains the file descriptor and other output related code, at this
point using vfprintf(). At some future point, someone may choose to
write a gui 'console' for bochs to which messages would be redirected
simply by assigning a different iofunction class to the various logfunctions
objects.
More cleanup is coming, but this works for now. If you want to see alot
of debugging output, in main.cc, change onoff[LOGLEV_DEBUG]=0 to =1.
Comments, bugs, flames, to me: todd@fries.net
2001-05-15 18:49:57 +04:00
|
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2008-06-22 07:45:55 +04:00
|
|
|
BX_CPP_INLINE void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near16(Bit16u new_IP)
|
|
|
|
{
|
2008-10-07 00:41:28 +04:00
|
|
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
|
|
|
|
2008-06-22 07:45:55 +04:00
|
|
|
// check always, not only in protected mode
|
|
|
|
if (new_IP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
|
|
|
|
{
|
|
|
|
BX_ERROR(("branch_near16: offset outside of CS limits"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
|
2008-10-07 00:41:28 +04:00
|
|
|
EIP = new_IP;
|
2008-06-22 07:45:55 +04:00
|
|
|
|
2011-08-21 18:31:08 +04:00
|
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS == 0
|
2008-06-22 07:45:55 +04:00
|
|
|
// assert magic async_event to stop trace execution
|
|
|
|
BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear16_Iw(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
2008-10-07 00:41:28 +04:00
|
|
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_ret;
|
|
|
|
#endif
|
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_SPECULATIVE;
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2007-12-20 21:29:42 +03:00
|
|
|
Bit16u return_IP = pop_16();
|
2004-11-02 20:31:14 +03:00
|
|
|
|
2007-12-30 23:16:35 +03:00
|
|
|
if (return_IP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
|
2004-11-02 21:05:19 +03:00
|
|
|
{
|
2008-05-09 01:04:03 +04:00
|
|
|
BX_ERROR(("RETnear16_Iw: IP > limit"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2004-11-02 20:31:14 +03:00
|
|
|
|
2008-10-07 00:41:28 +04:00
|
|
|
EIP = return_IP;
|
2005-02-17 00:27:21 +03:00
|
|
|
|
|
|
|
Bit16u imm16 = i->Iw();
|
2004-11-02 20:31:14 +03:00
|
|
|
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */
|
2005-02-17 00:27:21 +03:00
|
|
|
ESP += imm16;
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
else
|
2007-11-24 17:22:34 +03:00
|
|
|
SP += imm16;
|
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_COMMIT;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, PREV_RIP, EIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear16(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
2008-10-07 00:41:28 +04:00
|
|
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_ret;
|
|
|
|
#endif
|
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_SPECULATIVE;
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2007-12-20 21:29:42 +03:00
|
|
|
Bit16u return_IP = pop_16();
|
2004-11-02 21:05:19 +03:00
|
|
|
|
2007-12-30 23:16:35 +03:00
|
|
|
if (return_IP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
|
2004-11-02 21:05:19 +03:00
|
|
|
{
|
2008-05-09 01:04:03 +04:00
|
|
|
BX_ERROR(("RETnear16: IP > limit"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2004-11-02 21:05:19 +03:00
|
|
|
|
2008-10-07 00:41:28 +04:00
|
|
|
EIP = return_IP;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_COMMIT;
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, PREV_RIP, EIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar16_Iw(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
Bit16u ip, cs_raw;
|
|
|
|
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
invalidate_prefetch_q();
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_ret;
|
|
|
|
#endif
|
|
|
|
|
2008-06-23 19:58:22 +04:00
|
|
|
Bit16s imm16 = (Bit16s) i->Iw();
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
if (protected_mode()) {
|
2009-03-23 00:12:35 +03:00
|
|
|
return_protected(i, imm16);
|
2001-04-10 05:04:59 +04:00
|
|
|
goto done;
|
2005-02-17 00:27:21 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2010-02-15 11:42:57 +03:00
|
|
|
RSP_SPECULATIVE;
|
|
|
|
|
2007-12-20 21:29:42 +03:00
|
|
|
ip = pop_16();
|
|
|
|
cs_raw = pop_16();
|
2005-02-17 00:27:21 +03:00
|
|
|
|
2008-06-23 19:58:22 +04:00
|
|
|
// CS.LIMIT can't change when in real/v8086 mode
|
|
|
|
if (ip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
|
|
|
|
BX_ERROR(("RETfar16_Iw: instruction pointer not within code segment limits"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-06-23 19:58:22 +04:00
|
|
|
}
|
|
|
|
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
|
2010-02-21 09:56:48 +03:00
|
|
|
EIP = (Bit32u) ip;
|
2005-02-17 00:27:21 +03:00
|
|
|
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
|
|
|
|
ESP += imm16;
|
|
|
|
else
|
2007-11-24 17:22:34 +03:00
|
|
|
SP += imm16;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_COMMIT;
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2010-02-15 11:42:57 +03:00
|
|
|
done:
|
|
|
|
|
2003-02-13 18:04:11 +03:00
|
|
|
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET,
|
2002-09-13 04:15:23 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_Jw(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_call;
|
|
|
|
#endif
|
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_SPECULATIVE;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
/* push 16 bit EA of next instruction */
|
|
|
|
push_16(IP);
|
2008-08-16 19:32:44 +04:00
|
|
|
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-08-16 19:32:44 +04:00
|
|
|
branch_near16(new_IP);
|
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_COMMIT;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL, PREV_RIP, EIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2013-06-24 01:12:03 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL16_Ap(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
2008-10-07 00:41:28 +04:00
|
|
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
|
|
|
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
invalidate_prefetch_q();
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_call;
|
|
|
|
#endif
|
|
|
|
|
2008-06-23 19:58:22 +04:00
|
|
|
Bit16u disp16 = i->Iw();
|
|
|
|
Bit16u cs_raw = i->Iw2();
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_SPECULATIVE;
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
if (protected_mode()) {
|
2009-03-23 00:12:35 +03:00
|
|
|
call_protected(i, cs_raw, disp16);
|
2001-04-10 05:04:59 +04:00
|
|
|
goto done;
|
2005-03-12 19:40:14 +03:00
|
|
|
}
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
2008-06-23 19:58:22 +04:00
|
|
|
push_16(IP);
|
|
|
|
|
|
|
|
// CS.LIMIT can't change when in real/v8086 mode
|
|
|
|
if (disp16 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
|
|
|
|
BX_ERROR(("CALL16_Ap: instruction pointer not within code segment limits"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-06-23 19:58:22 +04:00
|
|
|
}
|
2007-10-19 01:27:56 +04:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
|
2008-10-07 00:41:28 +04:00
|
|
|
EIP = (Bit32u) disp16;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
done:
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_COMMIT;
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2003-02-13 18:04:11 +03:00
|
|
|
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL,
|
2002-09-13 04:15:23 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_EwR(bxInstruction_c *i)
|
2008-01-12 19:40:38 +03:00
|
|
|
{
|
2012-08-05 17:52:40 +04:00
|
|
|
Bit16u new_IP = BX_READ_16BIT_REG(i->dst());
|
2008-01-12 19:40:38 +03:00
|
|
|
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_call;
|
|
|
|
#endif
|
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_SPECULATIVE;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2008-08-16 19:32:44 +04:00
|
|
|
/* push 16 bit EA of next instruction */
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
push_16(IP);
|
2008-08-16 19:32:44 +04:00
|
|
|
|
|
|
|
branch_near16(new_IP);
|
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_COMMIT;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL_INDIRECT, PREV_RIP, EIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL16_Ep(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
Bit16u cs_raw;
|
|
|
|
Bit16u op1_16;
|
|
|
|
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
invalidate_prefetch_q();
|
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_call;
|
|
|
|
#endif
|
|
|
|
|
2008-08-08 13:22:49 +04:00
|
|
|
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
|
2008-01-10 22:37:56 +03:00
|
|
|
|
2008-08-08 13:22:49 +04:00
|
|
|
op1_16 = read_virtual_word(i->seg(), eaddr);
|
2010-10-19 02:19:45 +04:00
|
|
|
cs_raw = read_virtual_word(i->seg(), (eaddr+2) & i->asize_mask());
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_SPECULATIVE;
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2007-10-19 01:27:56 +04:00
|
|
|
if (protected_mode()) {
|
2009-03-23 00:12:35 +03:00
|
|
|
call_protected(i, cs_raw, op1_16);
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
goto done;
|
2005-03-12 19:40:14 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value);
|
|
|
|
push_16(IP);
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2008-06-23 19:58:22 +04:00
|
|
|
// CS.LIMIT can't change when in real/v8086 mode
|
|
|
|
if (op1_16 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
|
|
|
|
BX_ERROR(("CALL16_Ep: instruction pointer not within code segment limits"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-06-23 19:58:22 +04:00
|
|
|
}
|
|
|
|
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
|
2008-10-07 00:41:28 +04:00
|
|
|
EIP = op1_16;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
done:
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_COMMIT;
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2011-12-18 22:11:56 +04:00
|
|
|
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL_INDIRECT,
|
2002-09-13 04:15:23 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Jw(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, PREV_RIP, new_IP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JO_Jw(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
2007-11-12 21:20:15 +03:00
|
|
|
if (get_OF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNO_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! get_OF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JB_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (get_CF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNB_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! get_CF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2004-11-02 20:31:14 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2002-09-22 05:52:21 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JZ_Jw(bxInstruction_c *i)
|
2002-09-22 05:52:21 +04:00
|
|
|
{
|
|
|
|
if (get_ZF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNZ_Jw(bxInstruction_c *i)
|
2002-09-22 05:52:21 +04:00
|
|
|
{
|
2007-11-12 21:20:15 +03:00
|
|
|
if (! get_ZF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JBE_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (get_CF() || get_ZF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNBE_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! (get_CF() || get_ZF())) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JS_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (get_SF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNS_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! get_SF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JP_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (get_PF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNP_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! get_PF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JL_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (getB_SF() != getB_OF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNL_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (getB_SF() == getB_OF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JLE_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (get_ZF() || (getB_SF() != getB_OF())) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNLE_Jw(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! get_ZF() && (getB_SF() == getB_OF())) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2004-11-02 19:10:02 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2002-09-22 05:52:21 +04:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_EwR(bxInstruction_c *i)
|
2007-12-15 02:15:52 +03:00
|
|
|
{
|
2012-08-05 17:52:40 +04:00
|
|
|
Bit16u new_IP = BX_READ_16BIT_REG(i->dst());
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP_INDIRECT, PREV_RIP, new_IP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2007-11-12 21:20:15 +03:00
|
|
|
/* Far indirect jump */
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP16_Ep(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
|
|
|
Bit16u cs_raw;
|
|
|
|
Bit16u op1_16;
|
|
|
|
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
invalidate_prefetch_q();
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2008-08-08 13:22:49 +04:00
|
|
|
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
|
2008-01-10 22:37:56 +03:00
|
|
|
|
2008-08-08 13:22:49 +04:00
|
|
|
op1_16 = read_virtual_word(i->seg(), eaddr);
|
2010-10-19 02:19:45 +04:00
|
|
|
cs_raw = read_virtual_word(i->seg(), (eaddr+2) & i->asize_mask());
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2007-11-24 18:27:55 +03:00
|
|
|
// jump_protected doesn't affect RSP so it is RSP safe
|
2007-11-24 17:22:34 +03:00
|
|
|
if (protected_mode()) {
|
2009-03-23 00:12:35 +03:00
|
|
|
jump_protected(i, cs_raw, op1_16);
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
goto done;
|
2005-03-12 19:40:14 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2008-06-23 19:58:22 +04:00
|
|
|
// CS.LIMIT can't change when in real/v8086 mode
|
|
|
|
if (op1_16 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
|
|
|
|
BX_ERROR(("JMP16_Ep: instruction pointer not within code segment limits"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-06-23 19:58:22 +04:00
|
|
|
}
|
|
|
|
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
|
2008-10-07 00:41:28 +04:00
|
|
|
EIP = op1_16;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
done:
|
2008-06-23 19:58:22 +04:00
|
|
|
|
2011-12-18 22:11:56 +04:00
|
|
|
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP_INDIRECT,
|
2002-09-13 04:15:23 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET16(bxInstruction_c *i)
|
2001-04-10 05:04:59 +04:00
|
|
|
{
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
invalidate_prefetch_q();
|
|
|
|
|
2013-02-25 23:36:41 +04:00
|
|
|
#if BX_SUPPORT_SVM
|
|
|
|
if (BX_CPU_THIS_PTR in_svm_guest) {
|
|
|
|
if (SVM_INTERCEPT(SVM_INTERCEPT0_IRET)) Svm_Vmexit(SVM_VMEXIT_IRET);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-02-03 22:17:15 +03:00
|
|
|
#if BX_SUPPORT_VMX
|
2013-03-07 01:11:23 +04:00
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest)
|
|
|
|
if (is_masked_event(PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) ? BX_EVENT_VMX_VIRTUAL_NMI : BX_EVENT_NMI))
|
|
|
|
BX_CPU_THIS_PTR nmi_unblocking_iret = 1;
|
|
|
|
|
2013-03-06 01:12:43 +04:00
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest && PIN_VMEXIT(VMX_VM_EXEC_CTRL1_NMI_EXITING)) {
|
|
|
|
if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)) unmask_event(BX_EVENT_VMX_VIRTUAL_NMI);
|
|
|
|
}
|
|
|
|
else
|
2009-02-03 22:17:15 +03:00
|
|
|
#endif
|
2013-03-06 01:12:43 +04:00
|
|
|
unmask_event(BX_EVENT_NMI);
|
2009-02-03 22:17:15 +03:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_iret;
|
|
|
|
#endif
|
|
|
|
|
2010-02-21 09:56:48 +03:00
|
|
|
if (protected_mode()) {
|
|
|
|
iret_protected(i);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
RSP_SPECULATIVE;
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2001-04-10 05:04:59 +04:00
|
|
|
if (v8086_mode()) {
|
|
|
|
// IOPL check in stack_return_from_v86()
|
2005-10-17 17:06:09 +04:00
|
|
|
iret16_stack_return_from_v86(i);
|
2005-03-12 19:40:14 +03:00
|
|
|
}
|
2010-02-21 09:56:48 +03:00
|
|
|
else {
|
|
|
|
Bit16u ip = pop_16();
|
|
|
|
Bit16u cs_raw = pop_16(); // #SS has higher priority
|
|
|
|
Bit16u flags = pop_16();
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2010-02-21 09:56:48 +03:00
|
|
|
// CS.LIMIT can't change when in real/v8086 mode
|
|
|
|
if(ip > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) {
|
|
|
|
BX_ERROR(("IRET16: instruction pointer not within code segment limits"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-02-21 09:56:48 +03:00
|
|
|
}
|
2001-04-10 05:04:59 +04:00
|
|
|
|
2010-02-21 09:56:48 +03:00
|
|
|
load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw);
|
|
|
|
EIP = (Bit32u) ip;
|
|
|
|
write_flags(flags, /* change IOPL? */ 1, /* change IF? */ 1);
|
2008-06-23 19:58:22 +04:00
|
|
|
}
|
|
|
|
|
2010-02-21 09:56:48 +03:00
|
|
|
RSP_COMMIT;
|
2001-04-10 05:04:59 +04:00
|
|
|
|
|
|
|
done:
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2013-03-07 01:11:23 +04:00
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
BX_CPU_THIS_PTR nmi_unblocking_iret = 0;
|
|
|
|
#endif
|
|
|
|
|
2003-02-13 18:04:11 +03:00
|
|
|
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_IRET,
|
2002-09-13 04:15:23 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, EIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2001-04-10 05:04:59 +04:00
|
|
|
}
|
2008-06-22 07:45:55 +04:00
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JCXZ_Jb(bxInstruction_c *i)
|
2008-06-22 07:45:55 +04:00
|
|
|
{
|
|
|
|
// it is impossible to get this instruction in long mode
|
|
|
|
BX_ASSERT(i->as64L() == 0);
|
|
|
|
|
|
|
|
Bit32u temp_ECX;
|
|
|
|
|
|
|
|
if (i->as32L())
|
|
|
|
temp_ECX = ECX;
|
|
|
|
else
|
|
|
|
temp_ECX = CX;
|
|
|
|
|
|
|
|
if (temp_ECX == 0) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_TRACE(i);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// There is some weirdness in LOOP instructions definition. If an exception
|
|
|
|
// was generated during the instruction execution (for example #GP fault
|
|
|
|
// because EIP was beyond CS segment limits) CPU state should restore the
|
|
|
|
// state prior to instruction execution.
|
|
|
|
//
|
|
|
|
// The final point that we are not allowed to decrement ECX register before
|
|
|
|
// it is known that no exceptions can happen.
|
|
|
|
//
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPNE16_Jb(bxInstruction_c *i)
|
2008-06-22 07:45:55 +04:00
|
|
|
{
|
|
|
|
// it is impossible to get this instruction in long mode
|
|
|
|
BX_ASSERT(i->as64L() == 0);
|
|
|
|
|
|
|
|
if (i->as32L()) {
|
|
|
|
Bit32u count = ECX;
|
|
|
|
|
|
|
|
count--;
|
|
|
|
if (count != 0 && (get_ZF()==0)) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ECX = count;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
Bit16u count = CX;
|
|
|
|
|
|
|
|
count--;
|
|
|
|
if (count != 0 && (get_ZF()==0)) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
CX = count;
|
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPE16_Jb(bxInstruction_c *i)
|
2008-06-22 07:45:55 +04:00
|
|
|
{
|
|
|
|
// it is impossible to get this instruction in long mode
|
|
|
|
BX_ASSERT(i->as64L() == 0);
|
|
|
|
|
|
|
|
if (i->as32L()) {
|
|
|
|
Bit32u count = ECX;
|
|
|
|
|
|
|
|
count--;
|
|
|
|
if (count != 0 && get_ZF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ECX = count;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
Bit16u count = CX;
|
|
|
|
|
|
|
|
count--;
|
|
|
|
if (count != 0 && get_ZF()) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
CX = count;
|
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOP16_Jb(bxInstruction_c *i)
|
2008-06-22 07:45:55 +04:00
|
|
|
{
|
|
|
|
// it is impossible to get this instruction in long mode
|
|
|
|
BX_ASSERT(i->as64L() == 0);
|
|
|
|
|
|
|
|
if (i->as32L()) {
|
|
|
|
Bit32u count = ECX;
|
|
|
|
|
|
|
|
count--;
|
|
|
|
if (count != 0) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
ECX = count;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
Bit16u count = CX;
|
|
|
|
|
|
|
|
count--;
|
|
|
|
if (count != 0) {
|
2008-08-24 02:27:58 +04:00
|
|
|
Bit16u new_IP = IP + i->Iw();
|
2008-06-22 07:45:55 +04:00
|
|
|
branch_near16(new_IP);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, new_IP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
CX = count;
|
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2008-06-22 07:45:55 +04:00
|
|
|
}
|