Bochs/bochs/cpu/ctrl_xfer64.cc

589 lines
14 KiB
C++
Raw Normal View History

/////////////////////////////////////////////////////////////////////////
// $Id$
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001-2011 The Bochs Project
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
2009-01-16 21:18:59 +03:00
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
/////////////////////////////////////////////////////////////////////////
#define NEED_CPU_REG_SHORTCUTS 1
#include "bochs.h"
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
- apply patch.ifdef-disabled-options. Comments from that patch are below: For a whole lot of configure options, I put #if...#endif around code that is specific to the option, even in files which are normally only compiled when the option is on. This allows me to create a MS Visual C++ 6.0 workspace that supports many of these options. The workspace will basically compile every file all the time, but the code for disabled options will be commented out by the #if...#endif. This may one day lead to simplification of the Makefiles and configure scripts, but for the moment I'm leaving Makefiles and configure scripts alone. Affected options: BX_SUPPORT_APIC (cpu/apic.cc) BX_SUPPORT_X86_64 (cpu/*64.cc) BX_DEBUGGER (debug/*) BX_DISASM (disasm/*) BX_WITH_nameofgui (gui/*) BX_SUPPORT_CDROM (iodev/cdrom.cc) BX_NE2K_SUPPORT (iodev/eth*.cc, iodev/ne2k.cc) BX_SUPPORT_APIC (iodev/ioapic.cc) BX_IODEBUG_SUPPORT (iodev/iodebug.cc) BX_PCI_SUPPORT (iodev/pci*.cc) BX_SUPPORT_SB16 (iodev/sb*.cc) Modified Files: cpu/apic.cc cpu/arith64.cc cpu/ctrl_xfer64.cc cpu/data_xfer64.cc cpu/fetchdecode64.cc cpu/logical64.cc cpu/mult64.cc cpu/resolve64.cc cpu/shift64.cc cpu/stack64.cc debug/Makefile.in debug/crc.cc debug/dbg_main.cc debug/lexer.l debug/linux.cc debug/parser.c debug/parser.y disasm/dis_decode.cc disasm/dis_groups.cc gui/amigaos.cc gui/beos.cc gui/carbon.cc gui/macintosh.cc gui/rfb.cc gui/sdl.cc gui/term.cc gui/win32.cc gui/wx.cc gui/wxdialog.cc gui/wxmain.cc gui/x.cc iodev/cdrom.cc iodev/eth.cc iodev/eth_arpback.cc iodev/eth_fbsd.cc iodev/eth_linux.cc iodev/eth_null.cc iodev/eth_packetmaker.cc iodev/eth_tap.cc iodev/eth_tuntap.cc iodev/eth_win32.cc iodev/ioapic.cc iodev/iodebug.cc iodev/ne2k.cc iodev/pci.cc iodev/pci2isa.cc iodev/sb16.cc iodev/soundlnx.cc iodev/soundwin.cc
2002-11-19 08:47:45 +03:00
#if BX_SUPPORT_X86_64
BX_CPP_INLINE void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near64(bxInstruction_c *i)
{
Bit64u new_RIP = RIP + (Bit32s) i->Id();
if (! IsCanonical(new_RIP)) {
BX_ERROR(("branch_near64: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0);
}
RIP = new_RIP;
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS == 0
// assert magic async_event to stop trace execution
BX_CPU_THIS_PTR async_event |= BX_ASYNC_EVENT_STOP_TRACE;
#endif
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear64_Iw(bxInstruction_c *i)
{
#if BX_DEBUGGER
BX_CPU_THIS_PTR show_flag |= Flag_ret;
#endif
Bit64u return_RIP = read_virtual_qword_64(BX_SEG_REG_SS, RSP);
if (! IsCanonical(return_RIP)) {
BX_ERROR(("RETnear64_Iw: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0);
}
RIP = return_RIP;
RSP += 8 + i->Iw();
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, RIP);
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETnear64(bxInstruction_c *i)
{
#if BX_DEBUGGER
BX_CPU_THIS_PTR show_flag |= Flag_ret;
#endif
Bit64u return_RIP = read_virtual_qword_64(BX_SEG_REG_SS, RSP);
if (! IsCanonical(return_RIP)) {
2008-05-09 01:04:03 +04:00
BX_ERROR(("RETnear64: canonical RIP violation %08x%08x", GET32H(return_RIP), GET32L(return_RIP)));
exception(BX_GP_EXCEPTION, 0);
}
RIP = return_RIP;
RSP += 8;
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET, RIP);
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::RETfar64_Iw(bxInstruction_c *i)
{
invalidate_prefetch_q();
#if BX_DEBUGGER
BX_CPU_THIS_PTR show_flag |= Flag_ret;
#endif
BX_ASSERT(protected_mode());
2010-02-21 09:56:48 +03:00
// return_protected is RSP safe
return_protected(i, i->Iw());
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_Jq(bxInstruction_c *i)
{
Bit64u new_RIP = RIP + (Bit32s) i->Id();
#if BX_DEBUGGER
BX_CPU_THIS_PTR show_flag |= Flag_call;
#endif
/* push 64 bit EA of next instruction */
2008-09-07 01:10:40 +04:00
write_virtual_qword_64(BX_SEG_REG_SS, RSP-8, RIP);
if (! IsCanonical(new_RIP)) {
2007-10-22 21:41:41 +04:00
BX_ERROR(("CALL_Jq: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0);
}
RIP = new_RIP;
2008-09-07 01:10:40 +04:00
RSP -= 8;
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL, RIP);
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL_EqR(bxInstruction_c *i)
2008-01-12 19:40:38 +03:00
{
#if BX_DEBUGGER
BX_CPU_THIS_PTR show_flag |= Flag_call;
#endif
Bit64u new_RIP = BX_READ_64BIT_REG(i->rm());
2008-01-12 19:40:38 +03:00
/* push 64 bit EA of next instruction */
2008-09-07 01:10:40 +04:00
write_virtual_qword_64(BX_SEG_REG_SS, RSP-8, RIP);
if (! IsCanonical(new_RIP))
{
BX_ERROR(("CALL_Eq: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0);
}
RIP = new_RIP;
2008-09-07 01:10:40 +04:00
RSP -= 8;
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL, RIP);
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CALL64_Ep(bxInstruction_c *i)
{
invalidate_prefetch_q();
#if BX_DEBUGGER
BX_CPU_THIS_PTR show_flag |= Flag_call;
#endif
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
/* pointer, segment address pair */
Bit64u op1_64 = read_virtual_qword_64(i->seg(), eaddr);
2010-10-19 02:19:45 +04:00
Bit16u cs_raw = read_virtual_word_64(i->seg(), (eaddr+8) & i->asize_mask());
2005-07-20 05:26:47 +04:00
BX_ASSERT(protected_mode());
2010-02-21 09:56:48 +03:00
// call_protected is RSP safe for 64-bit mode
call_protected(i, cs_raw, op1_64);
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_CALL,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_Jq(bxInstruction_c *i)
{
2008-01-12 19:40:38 +03:00
Bit64u new_RIP = RIP + (Bit32s) i->Id();
if (! IsCanonical(new_RIP)) {
BX_ERROR(("JMP_Jq: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0);
2008-01-12 19:40:38 +03:00
}
RIP = new_RIP;
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, RIP);
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JO_Jq(bxInstruction_c *i)
{
if (get_OF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNO_Jq(bxInstruction_c *i)
{
if (! get_OF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JB_Jq(bxInstruction_c *i)
{
if (get_CF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNB_Jq(bxInstruction_c *i)
{
if (! get_CF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JZ_Jq(bxInstruction_c *i)
{
if (get_ZF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNZ_Jq(bxInstruction_c *i)
{
if (! get_ZF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JBE_Jq(bxInstruction_c *i)
{
if (get_CF() || get_ZF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNBE_Jq(bxInstruction_c *i)
{
if (! (get_CF() || get_ZF())) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JS_Jq(bxInstruction_c *i)
{
if (get_SF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNS_Jq(bxInstruction_c *i)
{
if (! get_SF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JP_Jq(bxInstruction_c *i)
{
if (get_PF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNP_Jq(bxInstruction_c *i)
{
if (! get_PF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JL_Jq(bxInstruction_c *i)
{
if (getB_SF() != getB_OF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNL_Jq(bxInstruction_c *i)
{
if (getB_SF() == getB_OF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JLE_Jq(bxInstruction_c *i)
{
if (get_ZF() || (getB_SF() != getB_OF())) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JNLE_Jq(bxInstruction_c *i)
{
if (! get_ZF() && (getB_SF() == getB_OF())) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
BX_NEXT_TRACE(i);
}
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
BX_NEXT_INSTR(i); // trace can continue over non-taken branch
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP_EqR(bxInstruction_c *i)
2008-01-12 19:40:38 +03:00
{
Bit64u op1_64 = BX_READ_64BIT_REG(i->rm());
if (! IsCanonical(op1_64)) {
BX_ERROR(("JMP_Eq: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0);
}
RIP = op1_64;
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, RIP);
BX_NEXT_TRACE(i);
}
/* Far indirect jump */
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JMP64_Ep(bxInstruction_c *i)
{
invalidate_prefetch_q();
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
Bit64u op1_64 = read_virtual_qword_64(i->seg(), eaddr);
2010-10-19 02:19:45 +04:00
Bit16u cs_raw = read_virtual_word_64(i->seg(), (eaddr+8) & i->asize_mask());
2005-07-20 05:26:47 +04:00
BX_ASSERT(protected_mode());
jump_protected(i, cs_raw, op1_64);
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::IRET64(bxInstruction_c *i)
{
invalidate_prefetch_q();
#if BX_SUPPORT_VMX
if (!BX_CPU_THIS_PTR in_vmx_guest || !VMEXIT(VMX_VM_EXEC_CTRL1_NMI_VMEXIT))
#endif
BX_CPU_THIS_PTR disable_NMI = 0;
#if BX_DEBUGGER
BX_CPU_THIS_PTR show_flag |= Flag_iret;
#endif
BX_ASSERT(long_mode());
long_iret(i);
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_IRET,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, RIP);
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::JRCXZ_Jb(bxInstruction_c *i)
{
Bit64u temp_RCX;
if (i->as64L())
temp_RCX = RCX;
else
temp_RCX = ECX;
if (temp_RCX == 0) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
#endif
BX_NEXT_TRACE(i);
}
//
// There is some weirdness in LOOP instructions definition. If an exception
// was generated during the instruction execution (for example #GP fault
// because EIP was beyond CS segment limits) CPU state should restore the
2008-02-03 00:46:54 +03:00
// state prior to instruction execution.
//
// The final point that we are not allowed to decrement RCX register before
// it is known that no exceptions can happen.
//
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPNE64_Jb(bxInstruction_c *i)
{
if (i->as64L()) {
Bit64u count = RCX;
if (((--count) != 0) && (get_ZF()==0)) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
#endif
RCX = count;
}
else {
Bit32u count = ECX;
if (((--count) != 0) && (get_ZF()==0)) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
#endif
RCX = count;
}
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOPE64_Jb(bxInstruction_c *i)
{
if (i->as64L()) {
Bit64u count = RCX;
if (((--count) != 0) && get_ZF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
#endif
RCX = count;
}
else {
Bit32u count = ECX;
if (((--count) != 0) && get_ZF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
#endif
RCX = count;
}
BX_NEXT_TRACE(i);
}
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::LOOP64_Jb(bxInstruction_c *i)
{
if (i->as64L()) {
Bit64u count = RCX;
if ((--count) != 0) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
#endif
RCX = count;
}
else {
Bit32u count = ECX;
if ((--count) != 0) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
#endif
RCX = count;
}
BX_NEXT_TRACE(i);
}
- apply patch.ifdef-disabled-options. Comments from that patch are below: For a whole lot of configure options, I put #if...#endif around code that is specific to the option, even in files which are normally only compiled when the option is on. This allows me to create a MS Visual C++ 6.0 workspace that supports many of these options. The workspace will basically compile every file all the time, but the code for disabled options will be commented out by the #if...#endif. This may one day lead to simplification of the Makefiles and configure scripts, but for the moment I'm leaving Makefiles and configure scripts alone. Affected options: BX_SUPPORT_APIC (cpu/apic.cc) BX_SUPPORT_X86_64 (cpu/*64.cc) BX_DEBUGGER (debug/*) BX_DISASM (disasm/*) BX_WITH_nameofgui (gui/*) BX_SUPPORT_CDROM (iodev/cdrom.cc) BX_NE2K_SUPPORT (iodev/eth*.cc, iodev/ne2k.cc) BX_SUPPORT_APIC (iodev/ioapic.cc) BX_IODEBUG_SUPPORT (iodev/iodebug.cc) BX_PCI_SUPPORT (iodev/pci*.cc) BX_SUPPORT_SB16 (iodev/sb*.cc) Modified Files: cpu/apic.cc cpu/arith64.cc cpu/ctrl_xfer64.cc cpu/data_xfer64.cc cpu/fetchdecode64.cc cpu/logical64.cc cpu/mult64.cc cpu/resolve64.cc cpu/shift64.cc cpu/stack64.cc debug/Makefile.in debug/crc.cc debug/dbg_main.cc debug/lexer.l debug/linux.cc debug/parser.c debug/parser.y disasm/dis_decode.cc disasm/dis_groups.cc gui/amigaos.cc gui/beos.cc gui/carbon.cc gui/macintosh.cc gui/rfb.cc gui/sdl.cc gui/term.cc gui/win32.cc gui/wx.cc gui/wxdialog.cc gui/wxmain.cc gui/x.cc iodev/cdrom.cc iodev/eth.cc iodev/eth_arpback.cc iodev/eth_fbsd.cc iodev/eth_linux.cc iodev/eth_null.cc iodev/eth_packetmaker.cc iodev/eth_tap.cc iodev/eth_tuntap.cc iodev/eth_win32.cc iodev/ioapic.cc iodev/iodebug.cc iodev/ne2k.cc iodev/pci.cc iodev/pci2isa.cc iodev/sb16.cc iodev/soundlnx.cc iodev/soundwin.cc
2002-11-19 08:47:45 +03:00
#endif /* if BX_SUPPORT_X86_64 */