2009-04-05 22:16:29 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2011-02-25 00:54:04 +03:00
|
|
|
// $Id$
|
2002-09-13 19:53:22 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2012-03-25 15:54:32 +04:00
|
|
|
// Copyright (C) 2001-2012 The Bochs Project
|
2002-09-13 19:53:22 +04:00
|
|
|
//
|
|
|
|
// This library is free software; you can redistribute it and/or
|
|
|
|
// modify it under the terms of the GNU Lesser General Public
|
|
|
|
// License as published by the Free Software Foundation; either
|
|
|
|
// version 2 of the License, or (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
// Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public
|
|
|
|
// License along with this library; if not, write to the Free Software
|
2009-01-16 21:18:59 +03:00
|
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
2006-03-14 21:11:22 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2002-09-13 19:53:22 +04:00
|
|
|
|
|
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
|
|
|
#include "bochs.h"
|
2006-03-07 01:03:16 +03:00
|
|
|
#include "cpu.h"
|
2002-09-13 19:53:22 +04:00
|
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
|
|
|
|
2002-11-19 08:47:45 +03:00
|
|
|
#if BX_SUPPORT_X86_64
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2008-03-23 00:29:41 +03:00
|
|
|
BX_CPP_INLINE void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near64(bxInstruction_c *i)
|
|
|
|
{
|
|
|
|
Bit64u new_RIP = RIP + (Bit32s) i->Id();
|
|
|
|
|
|
|
|
if (! IsCanonical(new_RIP)) {
|
|
|
|
BX_ERROR(("branch_near64: canonical RIP violation"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-03-23 00:29:41 +03:00
|
|
|
}
|
|
|
|
|
2008-06-22 07:45:55 +04:00
|
|
|
RIP = new_RIP;
|
|
|
|
|
2011-08-21 18:31:08 +04:00
|
|
|
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS == 0
|
2008-03-23 00:29:41 +03:00
|
|
|
// assert magic async_event to stop trace execution
|
|
|
|
BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear64_Iw(bxInstruction_c *i)
|
2002-09-13 19:53:22 +04:00
|
|
|
{
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_ret;
|
|
|
|
#endif
|
|
|
|
|
2012-03-25 15:54:32 +04:00
|
|
|
Bit64u return_RIP = stack_read_qword(RSP);
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2006-03-14 21:11:22 +03:00
|
|
|
if (! IsCanonical(return_RIP)) {
|
2013-11-30 22:37:25 +04:00
|
|
|
BX_ERROR(("%s: canonical RIP violation", i->getIaOpcodeName()));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
2002-09-13 19:53:22 +04:00
|
|
|
|
|
|
|
RIP = return_RIP;
|
2007-12-20 23:58:38 +03:00
|
|
|
RSP += 8 + i->Iw();
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, PREV_RIP, RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear64(bxInstruction_c *i)
|
2002-09-13 19:53:22 +04:00
|
|
|
{
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_ret;
|
|
|
|
#endif
|
|
|
|
|
2012-03-25 15:54:32 +04:00
|
|
|
Bit64u return_RIP = stack_read_qword(RSP);
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2006-03-14 21:11:22 +03:00
|
|
|
if (! IsCanonical(return_RIP)) {
|
2013-11-30 22:37:25 +04:00
|
|
|
BX_ERROR(("%s: canonical RIP violation", i->getIaOpcodeName()));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
|
|
|
|
2002-09-13 19:53:22 +04:00
|
|
|
RIP = return_RIP;
|
2007-12-17 00:40:44 +03:00
|
|
|
RSP += 8;
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, PREV_RIP, RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar64_Iw(bxInstruction_c *i)
|
2002-09-13 19:53:22 +04:00
|
|
|
{
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
invalidate_prefetch_q();
|
|
|
|
|
2002-09-13 19:53:22 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_ret;
|
|
|
|
#endif
|
|
|
|
|
2005-07-11 00:32:32 +04:00
|
|
|
BX_ASSERT(protected_mode());
|
2005-03-20 21:01:01 +03:00
|
|
|
|
2010-02-21 09:56:48 +03:00
|
|
|
// return_protected is RSP safe
|
2005-07-21 05:59:05 +04:00
|
|
|
return_protected(i, i->Iw());
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2003-02-13 18:04:11 +03:00
|
|
|
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET,
|
2005-07-31 21:57:27 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_Jq(bxInstruction_c *i)
|
2002-09-13 19:53:22 +04:00
|
|
|
{
|
2005-11-12 01:02:42 +03:00
|
|
|
Bit64u new_RIP = RIP + (Bit32s) i->Id();
|
2002-09-13 19:53:22 +04:00
|
|
|
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_call;
|
|
|
|
#endif
|
|
|
|
|
2008-08-16 19:32:44 +04:00
|
|
|
/* push 64 bit EA of next instruction */
|
2012-03-25 15:54:32 +04:00
|
|
|
stack_write_qword(RSP-8, RIP);
|
2008-08-16 19:32:44 +04:00
|
|
|
|
2006-03-14 21:11:22 +03:00
|
|
|
if (! IsCanonical(new_RIP)) {
|
2013-11-30 22:37:25 +04:00
|
|
|
BX_ERROR(("%s: canonical RIP violation", i->getIaOpcodeName()));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
2002-09-13 19:53:22 +04:00
|
|
|
|
|
|
|
RIP = new_RIP;
|
2008-09-07 01:10:40 +04:00
|
|
|
RSP -= 8;
|
2008-08-16 19:32:44 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL, PREV_RIP, RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2013-06-24 01:12:03 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_EqR(bxInstruction_c *i)
|
2008-01-12 19:40:38 +03:00
|
|
|
{
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_call;
|
|
|
|
#endif
|
|
|
|
|
2012-08-05 17:52:40 +04:00
|
|
|
Bit64u new_RIP = BX_READ_64BIT_REG(i->dst());
|
2008-01-12 19:40:38 +03:00
|
|
|
|
2008-08-16 19:32:44 +04:00
|
|
|
/* push 64 bit EA of next instruction */
|
2012-03-25 15:54:32 +04:00
|
|
|
stack_write_qword(RSP-8, RIP);
|
2008-08-16 19:32:44 +04:00
|
|
|
|
|
|
|
if (! IsCanonical(new_RIP))
|
2005-04-17 22:54:54 +04:00
|
|
|
{
|
2013-11-30 22:37:25 +04:00
|
|
|
BX_ERROR(("%s: canonical RIP violation", i->getIaOpcodeName()));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2008-08-16 19:32:44 +04:00
|
|
|
RIP = new_RIP;
|
2008-09-07 01:10:40 +04:00
|
|
|
RSP -= 8;
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL_INDIRECT, PREV_RIP, RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL64_Ep(bxInstruction_c *i)
|
2002-09-13 19:53:22 +04:00
|
|
|
{
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
invalidate_prefetch_q();
|
|
|
|
|
2002-09-13 19:53:22 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_call;
|
|
|
|
#endif
|
|
|
|
|
2008-08-08 13:22:49 +04:00
|
|
|
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
|
2008-01-10 22:37:56 +03:00
|
|
|
|
2002-09-13 19:53:22 +04:00
|
|
|
/* pointer, segment address pair */
|
2008-08-08 13:22:49 +04:00
|
|
|
Bit64u op1_64 = read_virtual_qword_64(i->seg(), eaddr);
|
2010-10-19 02:19:45 +04:00
|
|
|
Bit16u cs_raw = read_virtual_word_64(i->seg(), (eaddr+8) & i->asize_mask());
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2005-07-20 05:26:47 +04:00
|
|
|
BX_ASSERT(protected_mode());
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2010-02-21 09:56:48 +03:00
|
|
|
// call_protected is RSP safe for 64-bit mode
|
2008-04-14 00:57:49 +04:00
|
|
|
call_protected(i, cs_raw, op1_64);
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2011-12-18 22:11:56 +04:00
|
|
|
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL_INDIRECT,
|
2005-07-31 21:57:27 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Jq(bxInstruction_c *i)
|
2002-09-13 19:53:22 +04:00
|
|
|
{
|
2008-01-12 19:40:38 +03:00
|
|
|
Bit64u new_RIP = RIP + (Bit32s) i->Id();
|
|
|
|
|
|
|
|
if (! IsCanonical(new_RIP)) {
|
2013-11-30 22:37:25 +04:00
|
|
|
BX_ERROR(("%s: canonical RIP violation", i->getIaOpcodeName()));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-01-12 19:40:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
RIP = new_RIP;
|
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, PREV_RIP, RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JO_Jq(bxInstruction_c *i)
|
2002-09-13 19:53:22 +04:00
|
|
|
{
|
2007-11-12 21:20:15 +03:00
|
|
|
if (get_OF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNO_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! get_OF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JB_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (get_CF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNB_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! get_CF()) {
|
2005-04-18 01:51:59 +04:00
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 00:26:10 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 00:26:10 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JZ_Jq(bxInstruction_c *i)
|
2007-11-12 00:26:10 +03:00
|
|
|
{
|
|
|
|
if (get_ZF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 00:26:10 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 00:26:10 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNZ_Jq(bxInstruction_c *i)
|
2007-11-12 00:26:10 +03:00
|
|
|
{
|
|
|
|
if (! get_ZF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JBE_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (get_CF() || get_ZF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNBE_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! (get_CF() || get_ZF())) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JS_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (get_SF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNS_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! get_SF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JP_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (get_PF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNP_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! get_PF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JL_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (getB_SF() != getB_OF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNL_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (getB_SF() == getB_OF()) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JLE_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (get_ZF() || (getB_SF() != getB_OF())) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2007-11-12 21:20:15 +03:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNLE_Jq(bxInstruction_c *i)
|
2007-11-12 21:20:15 +03:00
|
|
|
{
|
|
|
|
if (! get_ZF() && (getB_SF() == getB_OF())) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_EqR(bxInstruction_c *i)
|
2008-01-12 19:40:38 +03:00
|
|
|
{
|
2012-08-05 17:52:40 +04:00
|
|
|
Bit64u op1_64 = BX_READ_64BIT_REG(i->dst());
|
2008-01-12 19:40:38 +03:00
|
|
|
|
2005-04-17 22:54:54 +04:00
|
|
|
if (! IsCanonical(op1_64)) {
|
2013-11-30 22:37:25 +04:00
|
|
|
BX_ERROR(("%s: canonical RIP violation", i->getIaOpcodeName()));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
2002-09-13 19:53:22 +04:00
|
|
|
|
|
|
|
RIP = op1_64;
|
|
|
|
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP_INDIRECT, PREV_RIP, RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
|
|
|
|
2005-04-18 01:51:59 +04:00
|
|
|
/* Far indirect jump */
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP64_Ep(bxInstruction_c *i)
|
2002-09-13 19:53:22 +04:00
|
|
|
{
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
invalidate_prefetch_q();
|
|
|
|
|
2008-08-08 13:22:49 +04:00
|
|
|
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
|
2008-01-10 22:37:56 +03:00
|
|
|
|
2008-08-08 13:22:49 +04:00
|
|
|
Bit64u op1_64 = read_virtual_qword_64(i->seg(), eaddr);
|
2010-10-19 02:19:45 +04:00
|
|
|
Bit16u cs_raw = read_virtual_word_64(i->seg(), (eaddr+8) & i->asize_mask());
|
2002-09-13 19:53:22 +04:00
|
|
|
|
2005-07-20 05:26:47 +04:00
|
|
|
BX_ASSERT(protected_mode());
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2008-04-14 00:57:49 +04:00
|
|
|
jump_protected(i, cs_raw, op1_64);
|
2007-11-24 17:22:34 +03:00
|
|
|
|
2011-12-18 22:11:56 +04:00
|
|
|
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP_INDIRECT,
|
2002-09-15 19:10:21 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET64(bxInstruction_c *i)
|
2002-09-13 19:53:22 +04:00
|
|
|
{
|
I integrated my hacks to get Linux/x86-64 booting. To keep
these from interfering from a normal compile here's what I did.
In config.h.in (which will generate config.h after a configure),
I added a #define called KPL64Hacks:
#define KPL64Hacks
*After* running configure, you must set this by hand. It will
default to off, so you won't get my hacks in a normal compile.
This will go away soon. There is also a macro just after that
called BailBigRSP(). You don't need to enabled that, but you
can. In many of the instructions which seemed like they could
be hit by the fetchdecode64() process, but which also touched
EIP/ESP, I inserted a macro. Usually this macro expands to nothing.
If you like, you can enabled it, and it will panic if it finds
the upper bits of RIP/RSP set. This helped me find bugs.
Also, I cleaned up the emulation in ctrl_xfer{8,16,32}.cc.
There were some really old legacy code snippets which directly
accessed operands on the stack with access_linear. Lots of
ugly code instead of just pop_32() etc. Cleaning those up,
minimized the number of instructions which directly manipulate
the stack pointer, which should help in refining 64-bit support.
2002-09-24 04:44:56 +04:00
|
|
|
invalidate_prefetch_q();
|
|
|
|
|
2013-02-25 23:36:41 +04:00
|
|
|
#if BX_SUPPORT_SVM
|
|
|
|
if (BX_CPU_THIS_PTR in_svm_guest) {
|
|
|
|
if (SVM_INTERCEPT(SVM_INTERCEPT0_IRET)) Svm_Vmexit(SVM_VMEXIT_IRET);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-02-03 22:17:15 +03:00
|
|
|
#if BX_SUPPORT_VMX
|
2013-03-07 01:11:23 +04:00
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest)
|
|
|
|
if (is_masked_event(PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) ? BX_EVENT_VMX_VIRTUAL_NMI : BX_EVENT_NMI))
|
|
|
|
BX_CPU_THIS_PTR nmi_unblocking_iret = 1;
|
|
|
|
|
2013-03-06 01:12:43 +04:00
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest && PIN_VMEXIT(VMX_VM_EXEC_CTRL1_NMI_EXITING)) {
|
|
|
|
if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)) unmask_event(BX_EVENT_VMX_VIRTUAL_NMI);
|
|
|
|
}
|
|
|
|
else
|
2009-02-03 22:17:15 +03:00
|
|
|
#endif
|
2013-03-06 01:12:43 +04:00
|
|
|
unmask_event(BX_EVENT_NMI);
|
2009-02-03 22:17:15 +03:00
|
|
|
|
2002-09-13 19:53:22 +04:00
|
|
|
#if BX_DEBUGGER
|
|
|
|
BX_CPU_THIS_PTR show_flag |= Flag_iret;
|
|
|
|
#endif
|
|
|
|
|
2009-03-10 19:28:01 +03:00
|
|
|
BX_ASSERT(long_mode());
|
2007-11-24 17:22:34 +03:00
|
|
|
|
|
|
|
long_iret(i);
|
|
|
|
|
2013-03-07 01:11:23 +04:00
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
BX_CPU_THIS_PTR nmi_unblocking_iret = 0;
|
|
|
|
#endif
|
|
|
|
|
2003-02-13 18:04:11 +03:00
|
|
|
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_IRET,
|
2005-07-31 21:57:27 +04:00
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-13 19:53:22 +04:00
|
|
|
}
|
2002-09-27 11:01:02 +04:00
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JRCXZ_Jb(bxInstruction_c *i)
|
2002-09-27 11:01:02 +04:00
|
|
|
{
|
2008-06-23 19:58:22 +04:00
|
|
|
Bit64u temp_RCX;
|
|
|
|
|
|
|
|
if (i->as64L())
|
|
|
|
temp_RCX = RCX;
|
|
|
|
else
|
|
|
|
temp_RCX = ECX;
|
|
|
|
|
|
|
|
if (temp_RCX == 0) {
|
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_LINK_TRACE(i);
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
2012-08-21 23:58:41 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-27 11:01:02 +04:00
|
|
|
}
|
|
|
|
|
2007-12-21 20:30:49 +03:00
|
|
|
//
|
|
|
|
// There is some weirdness in LOOP instructions definition. If an exception
|
|
|
|
// was generated during the instruction execution (for example #GP fault
|
|
|
|
// because EIP was beyond CS segment limits) CPU state should restore the
|
2008-02-03 00:46:54 +03:00
|
|
|
// state prior to instruction execution.
|
2007-12-21 20:30:49 +03:00
|
|
|
//
|
2008-06-22 07:45:55 +04:00
|
|
|
// The final point that we are not allowed to decrement RCX register before
|
2007-12-21 20:30:49 +03:00
|
|
|
// it is known that no exceptions can happen.
|
|
|
|
//
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPNE64_Jb(bxInstruction_c *i)
|
2002-09-27 11:01:02 +04:00
|
|
|
{
|
2005-04-18 01:51:59 +04:00
|
|
|
if (i->as64L()) {
|
2007-12-21 20:30:49 +03:00
|
|
|
Bit64u count = RCX;
|
|
|
|
|
|
|
|
if (((--count) != 0) && (get_ZF()==0)) {
|
2005-04-18 01:51:59 +04:00
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2002-09-27 11:01:02 +04:00
|
|
|
}
|
2007-12-21 20:30:49 +03:00
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2007-12-21 20:30:49 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
RCX = count;
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
|
|
|
else {
|
2007-12-21 20:30:49 +03:00
|
|
|
Bit32u count = ECX;
|
|
|
|
|
|
|
|
if (((--count) != 0) && (get_ZF()==0)) {
|
2005-04-18 01:51:59 +04:00
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2005-04-18 01:51:59 +04:00
|
|
|
}
|
2007-12-21 20:30:49 +03:00
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2007-12-21 20:30:49 +03:00
|
|
|
}
|
|
|
|
#endif
|
2005-04-18 01:51:59 +04:00
|
|
|
|
2007-12-21 20:30:49 +03:00
|
|
|
RCX = count;
|
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-27 11:01:02 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPE64_Jb(bxInstruction_c *i)
|
2002-09-27 11:01:02 +04:00
|
|
|
{
|
2005-04-18 01:51:59 +04:00
|
|
|
if (i->as64L()) {
|
2007-12-21 20:30:49 +03:00
|
|
|
Bit64u count = RCX;
|
|
|
|
|
|
|
|
if (((--count) != 0) && get_ZF()) {
|
2005-04-18 01:51:59 +04:00
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2002-09-27 11:01:02 +04:00
|
|
|
}
|
2007-12-21 20:30:49 +03:00
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2007-12-21 20:30:49 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
RCX = count;
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
|
|
|
else {
|
2007-12-21 20:30:49 +03:00
|
|
|
Bit32u count = ECX;
|
|
|
|
|
|
|
|
if (((--count) != 0) && get_ZF()) {
|
2005-04-18 01:51:59 +04:00
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2005-04-18 01:51:59 +04:00
|
|
|
}
|
2007-12-21 20:30:49 +03:00
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2007-12-21 20:30:49 +03:00
|
|
|
}
|
|
|
|
#endif
|
2005-04-18 01:51:59 +04:00
|
|
|
|
2007-12-21 20:30:49 +03:00
|
|
|
RCX = count;
|
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-27 11:01:02 +04:00
|
|
|
}
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOP64_Jb(bxInstruction_c *i)
|
2002-09-27 11:01:02 +04:00
|
|
|
{
|
2005-04-18 01:51:59 +04:00
|
|
|
if (i->as64L()) {
|
2007-12-21 20:30:49 +03:00
|
|
|
Bit64u count = RCX;
|
|
|
|
|
|
|
|
if ((--count) != 0) {
|
2005-04-18 01:51:59 +04:00
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2002-09-27 11:01:02 +04:00
|
|
|
}
|
2007-12-21 20:30:49 +03:00
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2007-12-21 20:30:49 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
RCX = count;
|
2005-04-17 22:54:54 +04:00
|
|
|
}
|
|
|
|
else {
|
2007-12-21 20:30:49 +03:00
|
|
|
Bit32u count = ECX;
|
|
|
|
|
|
|
|
if ((--count) != 0) {
|
2005-04-18 01:51:59 +04:00
|
|
|
branch_near64(i);
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, PREV_RIP, RIP);
|
2005-04-18 01:51:59 +04:00
|
|
|
}
|
2007-12-21 20:30:49 +03:00
|
|
|
#if BX_INSTRUMENTATION
|
|
|
|
else {
|
2012-07-24 19:32:55 +04:00
|
|
|
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID, PREV_RIP);
|
2007-12-21 20:30:49 +03:00
|
|
|
}
|
|
|
|
#endif
|
2005-04-18 01:51:59 +04:00
|
|
|
|
2007-12-21 20:30:49 +03:00
|
|
|
RCX = count;
|
|
|
|
}
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2002-09-27 11:01:02 +04:00
|
|
|
}
|
2003-12-30 00:47:36 +03:00
|
|
|
|
2002-11-19 08:47:45 +03:00
|
|
|
#endif /* if BX_SUPPORT_X86_64 */
|