Fixed XLAT instruction for x86-64

Small optimization for lazy flags for ADD/ADC/SUB/SBB instructions
Enable RETF64 for same privelege level return
This commit is contained in:
Stanislav Shwartsman 2005-07-21 01:59:05 +00:00
parent aceb8c683b
commit 51e03f071d
9 changed files with 140 additions and 147 deletions

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: arith16.cc,v 1.40 2005-05-20 20:06:49 sshwarts Exp $
// $Id: arith16.cc,v 1.41 2005-07-21 01:59:03 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -146,8 +146,7 @@ void BX_CPU_C::ADC_EwGw(bxInstruction_c *i)
Write_RMW_virtual_word(sum_16);
}
SET_FLAGS_OSZAPC_16(op1_16, op2_16, sum_16,
(temp_CF) ? BX_INSTR_ADC16 : BX_INSTR_ADD16);
SET_FLAGS_OSZAPC_16(op1_16, op2_16, sum_16, BX_INSTR_ADD_ADC16(temp_CF));
}
void BX_CPU_C::ADC_GwEw(bxInstruction_c *i)
@ -168,8 +167,7 @@ void BX_CPU_C::ADC_GwEw(bxInstruction_c *i)
BX_WRITE_16BIT_REG(i->nnn(), sum_16);
SET_FLAGS_OSZAPC_16(op1_16, op2_16, sum_16,
(temp_CF) ? BX_INSTR_ADC16 : BX_INSTR_ADD16);
SET_FLAGS_OSZAPC_16(op1_16, op2_16, sum_16, BX_INSTR_ADD_ADC16(temp_CF));
}
void BX_CPU_C::ADC_AXIw(bxInstruction_c *i)
@ -184,8 +182,7 @@ void BX_CPU_C::ADC_AXIw(bxInstruction_c *i)
AX = sum_16;
SET_FLAGS_OSZAPC_16(op1_16, op2_16, sum_16,
(temp_CF) ? BX_INSTR_ADC16 : BX_INSTR_ADD16);
SET_FLAGS_OSZAPC_16(op1_16, op2_16, sum_16, BX_INSTR_ADD_ADC16(temp_CF));
}
void BX_CPU_C::SBB_EwGw(bxInstruction_c *i)
@ -206,8 +203,7 @@ void BX_CPU_C::SBB_EwGw(bxInstruction_c *i)
Write_RMW_virtual_word(diff_16);
}
SET_FLAGS_OSZAPC_16(op1_16, op2_16, diff_16,
(temp_CF) ? BX_INSTR_SBB16 : BX_INSTR_SUB16);
SET_FLAGS_OSZAPC_16(op1_16, op2_16, diff_16, BX_INSTR_SUB_SBB16(temp_CF));
}
void BX_CPU_C::SBB_GwEw(bxInstruction_c *i)
@ -228,8 +224,7 @@ void BX_CPU_C::SBB_GwEw(bxInstruction_c *i)
BX_WRITE_16BIT_REG(i->nnn(), diff_16);
SET_FLAGS_OSZAPC_16(op1_16, op2_16, diff_16,
(temp_CF) ? BX_INSTR_SBB16 : BX_INSTR_SUB16);
SET_FLAGS_OSZAPC_16(op1_16, op2_16, diff_16, BX_INSTR_SUB_SBB16(temp_CF));
}
void BX_CPU_C::SBB_AXIw(bxInstruction_c *i)
@ -243,8 +238,7 @@ void BX_CPU_C::SBB_AXIw(bxInstruction_c *i)
AX = diff_16;
SET_FLAGS_OSZAPC_16(op1_16, op2_16, diff_16,
(temp_CF) ? BX_INSTR_SBB16 : BX_INSTR_SUB16);
SET_FLAGS_OSZAPC_16(op1_16, op2_16, diff_16, BX_INSTR_SUB_SBB16(temp_CF));
}
void BX_CPU_C::SBB_EwIw(bxInstruction_c *i)
@ -265,8 +259,7 @@ void BX_CPU_C::SBB_EwIw(bxInstruction_c *i)
Write_RMW_virtual_word(diff_16);
}
SET_FLAGS_OSZAPC_16(op1_16, op2_16, diff_16,
(temp_CF) ? BX_INSTR_SBB16 : BX_INSTR_SUB16);
SET_FLAGS_OSZAPC_16(op1_16, op2_16, diff_16, BX_INSTR_SUB_SBB16(temp_CF));
}
void BX_CPU_C::SUB_EwGw(bxInstruction_c *i)
@ -525,8 +518,7 @@ void BX_CPU_C::ADC_EwIw(bxInstruction_c *i)
Write_RMW_virtual_word(sum_16);
}
SET_FLAGS_OSZAPC_16(op1_16, op2_16, sum_16,
(temp_CF) ? BX_INSTR_ADC16 : BX_INSTR_ADD16);
SET_FLAGS_OSZAPC_16(op1_16, op2_16, sum_16, BX_INSTR_ADD_ADC16(temp_CF));
}
void BX_CPU_C::SUB_EwIw(bxInstruction_c *i)
@ -636,7 +628,7 @@ void BX_CPU_C::DEC_Ew(bxInstruction_c *i)
op1_16--;
BX_WRITE_16BIT_REG(i->rm(), op1_16);
#endif
}
}
else {
read_RMW_virtual_word(i->seg(), RMAddr(i), &op1_16);
#if defined(BX_HostAsm_Dec16)

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: arith32.cc,v 1.45 2005-05-19 20:25:14 sshwarts Exp $
// $Id: arith32.cc,v 1.46 2005-07-21 01:59:03 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -163,8 +163,7 @@ void BX_CPU_C::ADC_EdGd(bxInstruction_c *i)
Write_RMW_virtual_dword(sum_32);
}
SET_FLAGS_OSZAPC_32(op1_32, op2_32, sum_32,
(temp_CF) ? BX_INSTR_ADC32 : BX_INSTR_ADD32);
SET_FLAGS_OSZAPC_32(op1_32, op2_32, sum_32, BX_INSTR_ADD_ADC32(temp_CF));
}
void BX_CPU_C::ADC_GdEd(bxInstruction_c *i)
@ -186,8 +185,7 @@ void BX_CPU_C::ADC_GdEd(bxInstruction_c *i)
BX_WRITE_32BIT_REGZ(i->nnn(), sum_32);
SET_FLAGS_OSZAPC_32(op1_32, op2_32, sum_32,
(temp_CF) ? BX_INSTR_ADC32 : BX_INSTR_ADD32);
SET_FLAGS_OSZAPC_32(op1_32, op2_32, sum_32, BX_INSTR_ADD_ADC32(temp_CF));
}
void BX_CPU_C::ADC_EAXId(bxInstruction_c *i)
@ -202,8 +200,7 @@ void BX_CPU_C::ADC_EAXId(bxInstruction_c *i)
RAX = sum_32;
SET_FLAGS_OSZAPC_32(op1_32, op2_32, sum_32,
(temp_CF) ? BX_INSTR_ADC32 : BX_INSTR_ADD32);
SET_FLAGS_OSZAPC_32(op1_32, op2_32, sum_32, BX_INSTR_ADD_ADC32(temp_CF));
}
void BX_CPU_C::SBB_EdGd(bxInstruction_c *i)
@ -225,8 +222,7 @@ void BX_CPU_C::SBB_EdGd(bxInstruction_c *i)
Write_RMW_virtual_dword(diff_32);
}
SET_FLAGS_OSZAPC_32(op1_32, op2_32, diff_32,
(temp_CF) ? BX_INSTR_SBB32 : BX_INSTR_SUB32);
SET_FLAGS_OSZAPC_32(op1_32, op2_32, diff_32, BX_INSTR_SUB_SBB32(temp_CF));
}
void BX_CPU_C::SBB_GdEd(bxInstruction_c *i)
@ -248,8 +244,7 @@ void BX_CPU_C::SBB_GdEd(bxInstruction_c *i)
BX_WRITE_32BIT_REGZ(i->nnn(), diff_32);
SET_FLAGS_OSZAPC_32(op1_32, op2_32, diff_32,
(temp_CF) ? BX_INSTR_SBB32 : BX_INSTR_SUB32);
SET_FLAGS_OSZAPC_32(op1_32, op2_32, diff_32, BX_INSTR_SUB_SBB32(temp_CF));
}
void BX_CPU_C::SBB_EAXId(bxInstruction_c *i)
@ -264,8 +259,7 @@ void BX_CPU_C::SBB_EAXId(bxInstruction_c *i)
RAX = diff_32;
SET_FLAGS_OSZAPC_32(op1_32, op2_32, diff_32,
(temp_CF) ? BX_INSTR_SBB32 : BX_INSTR_SUB32);
SET_FLAGS_OSZAPC_32(op1_32, op2_32, diff_32, BX_INSTR_SUB_SBB32(temp_CF));
}
void BX_CPU_C::SBB_EdId(bxInstruction_c *i)
@ -287,8 +281,7 @@ void BX_CPU_C::SBB_EdId(bxInstruction_c *i)
Write_RMW_virtual_dword(diff_32);
}
SET_FLAGS_OSZAPC_32(op1_32, op2_32, diff_32,
(temp_CF) ? BX_INSTR_SBB32 : BX_INSTR_SUB32);
SET_FLAGS_OSZAPC_32(op1_32, op2_32, diff_32, BX_INSTR_SUB_SBB32(temp_CF));
}
void BX_CPU_C::SUB_EdGd(bxInstruction_c *i)
@ -553,8 +546,7 @@ void BX_CPU_C::ADC_EdId(bxInstruction_c *i)
Write_RMW_virtual_dword(sum_32);
}
SET_FLAGS_OSZAPC_32(op1_32, op2_32, sum_32,
(temp_CF) ? BX_INSTR_ADC32 : BX_INSTR_ADD32);
SET_FLAGS_OSZAPC_32(op1_32, op2_32, sum_32, BX_INSTR_ADD_ADC32(temp_CF));
}
void BX_CPU_C::SUB_EdId(bxInstruction_c *i)

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: arith64.cc,v 1.26 2005-05-19 20:25:15 sshwarts Exp $
// $Id: arith64.cc,v 1.27 2005-07-21 01:59:03 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -117,8 +117,7 @@ void BX_CPU_C::ADC_EqGq(bxInstruction_c *i)
Write_RMW_virtual_qword(sum_64);
}
SET_FLAGS_OSZAPC_64(op1_64, op2_64, sum_64,
(temp_CF) ? BX_INSTR_ADC64 : BX_INSTR_ADD64);
SET_FLAGS_OSZAPC_64(op1_64, op2_64, sum_64, BX_INSTR_ADD_ADC64(temp_CF));
}
void BX_CPU_C::ADC_GqEq(bxInstruction_c *i)
@ -144,8 +143,7 @@ void BX_CPU_C::ADC_GqEq(bxInstruction_c *i)
/* now write sum back to destination */
BX_WRITE_64BIT_REG(i->nnn(), sum_64);
SET_FLAGS_OSZAPC_64(op1_64, op2_64, sum_64,
(temp_CF) ? BX_INSTR_ADC64 : BX_INSTR_ADD64);
SET_FLAGS_OSZAPC_64(op1_64, op2_64, sum_64, BX_INSTR_ADD_ADC64(temp_CF));
}
void BX_CPU_C::ADC_RAXId(bxInstruction_c *i)
@ -162,8 +160,7 @@ void BX_CPU_C::ADC_RAXId(bxInstruction_c *i)
/* now write sum back to destination */
RAX = sum_64;
SET_FLAGS_OSZAPC_64(op1_64, op2_64, sum_64,
(temp_CF) ? BX_INSTR_ADC64 : BX_INSTR_ADD64);
SET_FLAGS_OSZAPC_64(op1_64, op2_64, sum_64, BX_INSTR_ADD_ADC64(temp_CF));
}
void BX_CPU_C::SBB_EqGq(bxInstruction_c *i)
@ -188,8 +185,7 @@ void BX_CPU_C::SBB_EqGq(bxInstruction_c *i)
Write_RMW_virtual_qword(diff_64);
}
SET_FLAGS_OSZAPC_64(op1_64, op2_64, diff_64,
(temp_CF) ? BX_INSTR_SBB64 : BX_INSTR_SUB64);
SET_FLAGS_OSZAPC_64(op1_64, op2_64, diff_64, BX_INSTR_SUB_SBB64(temp_CF));
}
void BX_CPU_C::SBB_GqEq(bxInstruction_c *i)
@ -215,8 +211,7 @@ void BX_CPU_C::SBB_GqEq(bxInstruction_c *i)
/* now write diff back to destination */
BX_WRITE_64BIT_REG(i->nnn(), diff_64);
SET_FLAGS_OSZAPC_64(op1_64, op2_64, diff_64,
(temp_CF) ? BX_INSTR_SBB64 : BX_INSTR_SUB64);
SET_FLAGS_OSZAPC_64(op1_64, op2_64, diff_64, BX_INSTR_SUB_SBB64(temp_CF));
}
void BX_CPU_C::SBB_RAXId(bxInstruction_c *i)
@ -233,8 +228,7 @@ void BX_CPU_C::SBB_RAXId(bxInstruction_c *i)
/* now write diff back to destination */
RAX = diff_64;
SET_FLAGS_OSZAPC_64(op1_64, op2_64, diff_64,
(temp_CF) ? BX_INSTR_SBB64 : BX_INSTR_SUB64);
SET_FLAGS_OSZAPC_64(op1_64, op2_64, diff_64, BX_INSTR_SUB_SBB64(temp_CF));
}
void BX_CPU_C::SBB_EqId(bxInstruction_c *i)
@ -259,8 +253,7 @@ void BX_CPU_C::SBB_EqId(bxInstruction_c *i)
Write_RMW_virtual_qword(diff_64);
}
SET_FLAGS_OSZAPC_64(op1_64, op2_64, diff_64,
(temp_CF) ? BX_INSTR_SBB64 : BX_INSTR_SUB64);
SET_FLAGS_OSZAPC_64(op1_64, op2_64, diff_64, BX_INSTR_SUB_SBB64(temp_CF));
}
void BX_CPU_C::SUB_EqGq(bxInstruction_c *i)
@ -481,8 +474,7 @@ void BX_CPU_C::ADC_EqId(bxInstruction_c *i)
Write_RMW_virtual_qword(sum_64);
}
SET_FLAGS_OSZAPC_64(op1_64, op2_64, sum_64,
(temp_CF) ? BX_INSTR_ADC64 : BX_INSTR_ADD64);
SET_FLAGS_OSZAPC_64(op1_64, op2_64, sum_64, BX_INSTR_ADD_ADC64(temp_CF));
}
void BX_CPU_C::SUB_EqId(bxInstruction_c *i)

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: arith8.cc,v 1.36 2005-05-20 20:06:50 sshwarts Exp $
// $Id: arith8.cc,v 1.37 2005-07-21 01:59:03 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -100,8 +100,7 @@ void BX_CPU_C::ADC_EbGb(bxInstruction_c *i)
Write_RMW_virtual_byte(sum);
}
SET_FLAGS_OSZAPC_8(op1, op2, sum,
(temp_CF) ? BX_INSTR_ADC8 : BX_INSTR_ADD8);
SET_FLAGS_OSZAPC_8(op1, op2, sum, BX_INSTR_ADD_ADC8(temp_CF));
}
void BX_CPU_C::ADC_GbEb(bxInstruction_c *i)
@ -120,8 +119,7 @@ void BX_CPU_C::ADC_GbEb(bxInstruction_c *i)
sum = op1 + op2 + temp_CF;
SET_FLAGS_OSZAPC_8(op1, op2, sum,
(temp_CF) ? BX_INSTR_ADC8 : BX_INSTR_ADD8);
SET_FLAGS_OSZAPC_8(op1, op2, sum, BX_INSTR_ADD_ADC8(temp_CF));
BX_WRITE_8BIT_REGx(i->nnn(), i->extend8bitL(), sum);
}
@ -136,8 +134,7 @@ void BX_CPU_C::ADC_ALIb(bxInstruction_c *i)
sum = op1 + op2 + temp_CF;
AL = sum;
SET_FLAGS_OSZAPC_8(op1, op2, sum,
(temp_CF) ? BX_INSTR_ADC8 : BX_INSTR_ADD8);
SET_FLAGS_OSZAPC_8(op1, op2, sum, BX_INSTR_ADD_ADC8(temp_CF));
}
void BX_CPU_C::SBB_EbGb(bxInstruction_c *i)
@ -158,8 +155,7 @@ void BX_CPU_C::SBB_EbGb(bxInstruction_c *i)
Write_RMW_virtual_byte(diff_8);
}
SET_FLAGS_OSZAPC_8(op1_8, op2_8, diff_8,
(temp_CF) ? BX_INSTR_SBB8 : BX_INSTR_SUB8);
SET_FLAGS_OSZAPC_8(op1_8, op2_8, diff_8, BX_INSTR_SUB_SBB8(temp_CF));
}
void BX_CPU_C::SBB_GbEb(bxInstruction_c *i)
@ -180,8 +176,7 @@ void BX_CPU_C::SBB_GbEb(bxInstruction_c *i)
BX_WRITE_8BIT_REGx(i->nnn(), i->extend8bitL(), diff_8);
SET_FLAGS_OSZAPC_8(op1_8, op2_8, diff_8,
(temp_CF) ? BX_INSTR_SBB8 : BX_INSTR_SUB8);
SET_FLAGS_OSZAPC_8(op1_8, op2_8, diff_8, BX_INSTR_SUB_SBB8(temp_CF));
}
void BX_CPU_C::SBB_ALIb(bxInstruction_c *i)
@ -194,8 +189,7 @@ void BX_CPU_C::SBB_ALIb(bxInstruction_c *i)
diff_8 = op1_8 - (op2_8 + temp_CF);
AL = diff_8;
SET_FLAGS_OSZAPC_8(op1_8, op2_8, diff_8,
(temp_CF) ? BX_INSTR_SBB8 : BX_INSTR_SUB8);
SET_FLAGS_OSZAPC_8(op1_8, op2_8, diff_8, BX_INSTR_SUB_SBB8(temp_CF));
}
void BX_CPU_C::SBB_EbIb(bxInstruction_c *i)
@ -216,8 +210,7 @@ void BX_CPU_C::SBB_EbIb(bxInstruction_c *i)
Write_RMW_virtual_byte(diff_8);
}
SET_FLAGS_OSZAPC_8(op1_8, op2_8, diff_8,
(temp_CF) ? BX_INSTR_SBB8 : BX_INSTR_SUB8);
SET_FLAGS_OSZAPC_8(op1_8, op2_8, diff_8, BX_INSTR_SUB_SBB8(temp_CF));
}
void BX_CPU_C::SUB_EbGb(bxInstruction_c *i)
@ -409,8 +402,7 @@ void BX_CPU_C::ADC_EbIb(bxInstruction_c *i)
Write_RMW_virtual_byte(sum);
}
SET_FLAGS_OSZAPC_8(op1, op2, sum,
(temp_CF) ? BX_INSTR_ADC8 : BX_INSTR_ADD8);
SET_FLAGS_OSZAPC_8(op1, op2, sum, BX_INSTR_ADD_ADC8(temp_CF));
}
void BX_CPU_C::SUB_EbIb(bxInstruction_c *i)

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.h,v 1.224 2005-07-10 20:32:22 sshwarts Exp $
// $Id: cpu.h,v 1.225 2005-07-21 01:59:03 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -2712,9 +2712,6 @@ public: // for now...
BX_SMF void call_protected(bxInstruction_c *, Bit16u cs, bx_address disp) BX_CPP_AttrRegparmN(3);
BX_SMF void return_protected(bxInstruction_c *, Bit16u pop_bytes) BX_CPP_AttrRegparmN(2);
BX_SMF void iret_protected(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
#if BX_SUPPORT_X86_64
BX_SMF void long_return(bxInstruction_c *, Bit16u pop_bytes) BX_CPP_AttrRegparmN(2);
#endif
BX_SMF void validate_seg_regs(void);
BX_SMF void stack_return_to_v86(Bit32u new_eip, Bit32u raw_cs_selector,
Bit32u flags32);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: ctrl_xfer64.cc,v 1.36 2005-07-20 01:26:45 sshwarts Exp $
// $Id: ctrl_xfer64.cc,v 1.37 2005-07-21 01:59:04 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -90,8 +90,6 @@ void BX_CPU_C::RETnear64(bxInstruction_c *i)
void BX_CPU_C::RETfar64_Iw(bxInstruction_c *i)
{
Bit64u rip, rcs_raw;
invalidate_prefetch_q();
#if BX_DEBUGGER
@ -100,7 +98,9 @@ void BX_CPU_C::RETfar64_Iw(bxInstruction_c *i)
BX_ASSERT(protected_mode());
long_return(i, i->Iw());
BX_INFO(("RETF64_Iw instruction executed ..."));
return_protected(i, i->Iw());
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, BX_CPU_THIS_PTR rip);
@ -108,8 +108,6 @@ void BX_CPU_C::RETfar64_Iw(bxInstruction_c *i)
void BX_CPU_C::RETfar64(bxInstruction_c *i)
{
Bit64u rip, rcs_raw;
invalidate_prefetch_q();
#if BX_DEBUGGER
@ -118,7 +116,9 @@ void BX_CPU_C::RETfar64(bxInstruction_c *i)
BX_ASSERT(protected_mode());
long_return(i, 0);
BX_INFO(("RETF64 instruction executed ..."));
return_protected(i, 0);
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_RET,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, BX_CPU_THIS_PTR rip);
@ -291,6 +291,8 @@ void BX_CPU_C::JMP64_Ep(bxInstruction_c *i)
read_virtual_dword(i->seg(), RMAddr(i), &op1_32);
read_virtual_word(i->seg(), RMAddr(i)+4, &cs_raw);
BX_INFO(("JMPF64 instruction executed ..."));
BX_ASSERT(protected_mode());
BX_CPU_THIS_PTR jump_protected(i, cs_raw, op1_32);
@ -310,7 +312,6 @@ void BX_CPU_C::IRET64(bxInstruction_c *i)
BX_ASSERT(protected_mode());
iret_protected(i);
done:
BX_INSTR_FAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_IRET,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value, BX_CPU_THIS_PTR rip);
}

View File

@ -1,5 +1,5 @@
////////////////////////////////////////////////////////////////////////
// $Id: ctrl_xfer_pro.cc,v 1.43 2005-07-20 01:26:45 sshwarts Exp $
// $Id: ctrl_xfer_pro.cc,v 1.44 2005-07-21 01:59:04 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -359,7 +359,7 @@ BX_CPU_C::jump_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address dispBig)
if ( descriptor.u.gate386.dest_offset >
gate_cs_descriptor.u.segment.limit_scaled )
{
BX_ERROR(("jump_protected: IP > limit"));
BX_ERROR(("jump_protected: EIP > limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
@ -489,14 +489,11 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address dispBig)
// load eIP with new offset
load_cs(&cs_selector, &cs_descriptor, CPL);
RIP = dispBig;
#if BX_SUPPORT_X86_64
// not sure about this ???? - fix to work in long mode
if (cs_descriptor.u.segment.d_b==0 && cs_descriptor.u.segment.l==0)
EIP &= 0x0000ffff;
#else
if (cs_descriptor.u.segment.d_b==0)
EIP &= 0x0000ffff;
#endif
if (cs_descriptor.u.segment.d_b==0 && !IS_LONG64_SEGMENT(cs_descriptor))
RIP &= 0x0000ffff;
return;
}
else { // gate & special segment
@ -652,8 +649,7 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address dispBig)
// else #GP(code segment selector)
// DPL of selected descriptor must be <= CPL,
// else #GP(code segment selector)
if (cs_descriptor.valid==0 ||
cs_descriptor.segment==0 ||
if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
cs_descriptor.u.segment.executable==0 ||
cs_descriptor.dpl > CPL)
{
@ -727,8 +723,7 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address dispBig)
// descriptor must indicate writable data segment,
// else #TS(SS selector)
if (ss_descriptor.valid==0 ||
ss_descriptor.segment==0 ||
if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
ss_descriptor.u.segment.executable ||
ss_descriptor.u.segment.r_w==0)
{
@ -932,16 +927,6 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address dispBig)
BX_PANIC(("call_protected: shouldn't get here!"));
}
#if BX_SUPPORT_X86_64
void BX_CPP_AttrRegparmN(2)
BX_CPU_C::long_return(bxInstruction_c *i, Bit16u pop_bytes)
{
BX_PANIC(("Return protected is not implemented in x86-64 mode !"));
}
#endif
void BX_CPP_AttrRegparmN(2)
BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
{
@ -1012,7 +997,7 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
stack_cs_offset, 2, CPL==3, BX_READ, &raw_cs_selector);
parse_selector(raw_cs_selector, &cs_selector);
if ( cs_selector.rpl < CPL ) {
if (cs_selector.rpl < CPL) {
BX_ERROR(("return_protected: CS.rpl < CPL"));
exception(BX_GP_EXCEPTION, raw_cs_selector & 0xfffc, 0);
return;
@ -1020,13 +1005,14 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
// if return selector RPL == CPL then
// RETURN TO SAME PRIVILEGE LEVEL
if ( cs_selector.rpl == CPL ) {
//BX_INFO(("return: to same level %04x:%08x",
if (cs_selector.rpl == CPL)
{
// BX_INFO(("return: to same level %04x:%08x",
// BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value,
// BX_CPU_THIS_PTR prev_eip));
// return selector must be non-null, else #GP(0) (???)
if ( (raw_cs_selector & 0xfffc) == 0 ) {
if ((raw_cs_selector & 0xfffc) == 0) {
BX_INFO(("return_protected: CS null"));
exception(BX_GP_EXCEPTION, 0, 0);
return;
@ -1039,8 +1025,7 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
// descriptor AR byte must indicate code segment, else #GP(selector)
parse_descriptor(dword1, dword2, &cs_descriptor);
if (cs_descriptor.valid==0 ||
cs_descriptor.segment==0 ||
if (cs_descriptor.valid==0 || cs_descriptor.segment==0 ||
cs_descriptor.u.segment.executable==0)
{
BX_INFO(("return_protected: same: AR byte not code"));
@ -1102,18 +1087,28 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
}
// EIP must be in code segment limit, else #GP(0)
if ( return_RIP > cs_descriptor.u.segment.limit_scaled ) {
BX_ERROR(("return_protected: return RIP > CS.limit"));
exception(BX_GP_EXCEPTION, 0, 0);
return;
#if BX_SUPPORT_X86_64
if (IsLongMode()) {
if (! IsCanonical(return_RIP)) {
BX_ERROR(("branch_near64: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
else
#endif
{
if (return_RIP > cs_descriptor.u.segment.limit_scaled) {
BX_ERROR(("return_protected: return RIP > CS.limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
// load CS:EIP from stack
// load CS register with descriptor
// increment eSP
load_cs(&cs_selector, &cs_descriptor, CPL);
RIP = return_RIP;
// increment eSP
#if BX_SUPPORT_X86_64
if (IsLongMode()) RSP += stack_param_offset + pop_bytes;
else
@ -1167,7 +1162,7 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
/* examine return CS selector and associated descriptor */
/* selector must be non-null else #GP(0) (???) */
/* selector must be non-null else #GP(0) */
if ( (raw_cs_selector & 0xfffc) == 0 ) {
BX_INFO(("return_protected: CS selector null"));
exception(BX_GP_EXCEPTION, 0, 0);
@ -1179,7 +1174,7 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
fetch_raw_descriptor(&cs_selector, &dword1, &dword2, BX_GP_EXCEPTION);
parse_descriptor(dword1, dword2, &cs_descriptor);
/* descriptor AR byte must indicate code segment else #GP(selector) (???) */
/* descriptor AR byte must indicate code segment else #GP(selector) */
if (cs_descriptor.valid==0 ||
cs_descriptor.segment==0 ||
cs_descriptor.u.segment.executable==0)
@ -1216,6 +1211,17 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
}
/* examine return SS selector and associated descriptor: */
#if BX_SUPPORT_X86_64
if (i->os64L()) {
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 24 + pop_bytes,
2, 0, BX_READ, &raw_ss_selector);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 16 + pop_bytes,
8, 0, BX_READ, &return_RSP);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 0,
8, 0, BX_READ, &return_RIP);
}
else
#endif
if (i->os32L()) {
Bit16u return_EIP, return_ESP;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 12 + pop_bytes,
@ -1229,7 +1235,6 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
}
else {
Bit16u return_SP, return_IP;
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 6 + pop_bytes,
2, 0, BX_READ, &raw_ss_selector);
access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 4 + pop_bytes,
@ -1262,9 +1267,8 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
}
/* descriptor AR byte must indicate a writable data segment,
* else #GP(selector) (???) */
if (ss_descriptor.valid==0 ||
ss_descriptor.segment==0 ||
* else #GP(selector) */
if (ss_descriptor.valid==0 || ss_descriptor.segment==0 ||
ss_descriptor.u.segment.executable ||
ss_descriptor.u.segment.r_w==0)
{
@ -1289,10 +1293,20 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
}
/* EIP must be in code segment limit, else #GP(0) */
if (return_RIP > cs_descriptor.u.segment.limit_scaled) {
BX_ERROR(("return_protected: EIP > CS.limit"));
exception(BX_GP_EXCEPTION, 0, 0);
return;
#if BX_SUPPORT_X86_64
if (IsLongMode()) {
if (! IsCanonical(return_RIP)) {
BX_ERROR(("branch_near64: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
else
#endif
{
if (return_RIP > cs_descriptor.u.segment.limit_scaled) {
BX_ERROR(("return_protected: EIP > CS.limit"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
/* set CPL to RPL of return CS selector */
@ -1300,13 +1314,13 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
/* set CS RPL to CPL */
/* load the CS-cache with return CS descriptor */
load_cs(&cs_selector, &cs_descriptor, cs_selector.rpl);
EIP = return_RIP;
RIP = return_RIP;
/* load SS:SP from stack */
/* load SS-cache with return SS descriptor */
load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl);
if (ss_descriptor.u.segment.d_b)
ESP = return_RSP + pop_bytes;
RSP = return_RSP + pop_bytes;
else
SP = (Bit16u) return_RSP + pop_bytes;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: data_xfer8.cc,v 1.21 2005-06-21 17:01:18 sshwarts Exp $
// $Id: data_xfer8.cc,v 1.22 2005-07-21 01:59:05 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -86,23 +86,26 @@ void BX_CPU_C::MOV_EbIb(bxInstruction_c *i)
void BX_CPU_C::XLAT(bxInstruction_c *i)
{
Bit32u offset_32;
bx_address offset;
#if BX_CPU_LEVEL >= 3
if (i->as32L()) {
offset_32 = EBX + AL;
#if BX_SUPPORT_X86_64
if (i->as64L()) {
offset = RBX + AL;
}
else
#endif /* BX_CPU_LEVEL >= 3 */
{
offset_32 = BX + AL;
#endif
if (i->as32L()) {
offset = EBX + AL;
}
else {
offset = BX + AL;
}
if (!BX_NULL_SEG_REG(i->seg())) {
read_virtual_byte(i->seg(), offset_32, &AL);
read_virtual_byte(i->seg(), offset, &AL);
}
else {
read_virtual_byte(BX_SEG_REG_DS, offset_32, &AL);
read_virtual_byte(BX_SEG_REG_DS, offset, &AL);
}
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: lazy_flags.h,v 1.19 2004-09-04 10:21:15 sshwarts Exp $
// $Id: lazy_flags.h,v 1.20 2005-07-21 01:59:05 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -32,16 +32,21 @@
#define BX_INSTR_ADD32 3
#define BX_INSTR_ADD64 4
#define BX_INSTR_SUB8 5
#define BX_INSTR_SUB16 6
#define BX_INSTR_SUB32 7
#define BX_INSTR_SUB64 8
// used only if CF = 1 when executing ADC instruction
#define BX_INSTR_ADC8 9
#define BX_INSTR_ADC16 10
#define BX_INSTR_ADC32 11
#define BX_INSTR_ADC64 12
#define BX_INSTR_ADC8 5
#define BX_INSTR_ADC16 6
#define BX_INSTR_ADC32 7
#define BX_INSTR_ADC64 8
#define BX_INSTR_ADD_ADC8(cf) (1 + (cf)<<2)
#define BX_INSTR_ADD_ADC16(cf) (2 + (cf)<<2)
#define BX_INSTR_ADD_ADC32(cf) (3 + (cf)<<2)
#define BX_INSTR_ADD_ADC64(cf) (4 + (cf)<<2)
#define BX_INSTR_SUB8 9
#define BX_INSTR_SUB16 10
#define BX_INSTR_SUB32 11
#define BX_INSTR_SUB64 12
// used only if CF = 1 when executing SBB instruction
#define BX_INSTR_SBB8 13
@ -49,6 +54,11 @@
#define BX_INSTR_SBB32 15
#define BX_INSTR_SBB64 16
#define BX_INSTR_SUB_SBB8(cf) (9 + (cf)<<2)
#define BX_INSTR_SUB_SBB16(cf) (10 + (cf)<<2)
#define BX_INSTR_SUB_SBB32(cf) (11 + (cf)<<2)
#define BX_INSTR_SUB_SBB64(cf) (12 + (cf)<<2)
#define BX_INSTR_INC8 17
#define BX_INSTR_INC16 18
#define BX_INSTR_INC32 19