Move duplicated code to separate function

And fix a bug I added by previous merge
This commit is contained in:
Stanislav Shwartsman 2005-04-17 21:51:59 +00:00
parent 6fa52214b0
commit caa0648188
3 changed files with 79 additions and 126 deletions

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.h,v 1.218 2005-04-17 18:54:54 sshwarts Exp $
// $Id: cpu.h,v 1.219 2005-04-17 21:51:58 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -2609,8 +2609,6 @@ public: // for now...
BX_SMF void cpu_loop(Bit32s max_instr_count);
BX_SMF unsigned handleAsyncEvent(void);
BX_SMF void boundaryFetch(Bit8u *fetchPtr, unsigned remainingInPage, bxInstruction_c *i);
BX_SMF void branch_near32(Bit32u new_eip) BX_CPP_AttrRegparmN(1);
BX_SMF void prefetch(void);
// revalidate_prefetch_q is now a no-op, due to the newer EIP window
// technique.
@ -2660,6 +2658,11 @@ public: // for now...
#define Write_RMW_virtual_dword(val32) write_RMW_virtual_dword(val32)
#define Write_RMW_virtual_qword(val64) write_RMW_virtual_qword(val64)
BX_SMF void branch_near32(Bit32u new_eip) BX_CPP_AttrRegparmN(1);
#if BX_SUPPORT_X86_64
BX_SMF void branch_near64(bxInstruction_c *i) BX_CPP_AttrRegparmN(1);
#endif
BX_SMF void access_linear(bx_address address, unsigned length, unsigned pl,
unsigned rw, void *data) BX_CPP_AttrRegparmN(3);
BX_SMF Bit32u translate_linear(bx_address laddr,

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: ctrl_xfer64.cc,v 1.32 2005-04-17 18:54:54 sshwarts Exp $
// $Id: ctrl_xfer64.cc,v 1.33 2005-04-17 21:51:59 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -235,21 +235,7 @@ done:
void BX_CPU_C::JMP_Jq(bxInstruction_c *i)
{
Bit64u new_RIP = RIP + (Bit32s) i->Id();
//invalidate_prefetch_q();
if (! i->os32L()) {
new_RIP &= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared.
}
else {
if (! IsCanonical(new_RIP)) {
BX_INFO(("JMP_Jq: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
RIP = new_RIP;
branch_near64(i);
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, RIP);
}
@ -283,19 +269,8 @@ void BX_CPU_C::JCC_Jq(bxInstruction_c *i)
}
if (condition) {
Bit64u new_RIP = RIP + (Bit32s) i->Id();
if (! i->os32L()) {
new_RIP &= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared.
}
else {
if (! IsCanonical(new_RIP)) {
BX_INFO(("JCC_Jq: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
RIP = new_RIP;
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
revalidate_prefetch_q();
}
#if BX_INSTRUMENTATION
else {
@ -327,8 +302,7 @@ void BX_CPU_C::JMP_Eq(bxInstruction_c *i)
BX_INSTR_UCNEAR_BRANCH(BX_CPU_ID, BX_INSTR_IS_JMP, RIP);
}
/* Far indirect jump */
/* Far indirect jump */
void BX_CPU_C::JMP64_Ep(bxInstruction_c *i)
{
Bit16u cs_raw;
@ -379,126 +353,82 @@ done:
void BX_CPU_C::JCXZ64_Jb(bxInstruction_c *i)
{
Bit64u temp_RCX;
if (i->as64L())
temp_RCX = RCX;
else
temp_RCX = ECX;
if (temp_RCX == 0) {
Bit64u new_RIP = RIP + (Bit32s) i->Id();
if (! i->os32L()) {
new_RIP &= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared.
if (i->as64L()) {
if ( RCX == 0 ) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
return;
}
else {
if (! IsCanonical(new_RIP)) {
BX_INFO(("JCXZ64_Jb: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
RIP = new_RIP;
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
revalidate_prefetch_q();
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
if ( ECX == 0 ) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
return;
}
}
#endif
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
void BX_CPU_C::LOOPNE64_Jb(bxInstruction_c *i)
{
Bit64u temp_RCX;
if (i->as64L())
temp_RCX = RCX;
else
temp_RCX = ECX;
if ( ((--temp_RCX)!=0) && (get_ZF()==0) ) {
Bit64u new_RIP = RIP + (Bit32s) i->Id();
if (! i->os32L()) {
new_RIP &= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared.
if (i->as64L()) {
if ( ((--RCX) != 0) && (get_ZF()==0) ) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
return;
}
else {
if (! IsCanonical(new_RIP)) {
BX_INFO(("LOOPNE64_Jb: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
RIP = new_RIP;
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
revalidate_prefetch_q();
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
if ( ((--ECX) != 0) && (get_ZF()==0) ) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
return;
}
}
#endif
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
void BX_CPU_C::LOOPE64_Jb(bxInstruction_c *i)
{
Bit64u temp_RCX;
if (i->as64L())
temp_RCX = RCX;
else
temp_RCX = ECX;
if ( ((--temp_RCX)!=0) && (get_ZF()) ) {
Bit64u new_RIP = RIP + (Bit32s) i->Id();
if (! i->os32L()) {
new_RIP &= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared.
if (i->as64L()) {
if ( ((--RCX)!=0) && (get_ZF()) ) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
return;
}
else {
if (! IsCanonical(new_RIP)) {
BX_INFO(("LOOPE64_Jb: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
RIP = new_RIP;
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
revalidate_prefetch_q();
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
if (((--ECX)!=0) && get_ZF()) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
return;
}
}
#endif
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
void BX_CPU_C::LOOP64_Jb(bxInstruction_c *i)
{
Bit64u temp_RCX;
if (i->as64L())
temp_RCX = RCX;
else
temp_RCX = ECX;
if ((--temp_RCX) != 0) {
Bit64u new_RIP = RIP + (Bit32s) i->Id();
if (! i->os32L()) {
new_RIP &= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared.
if (i->as64L()) {
if ((--RCX) != 0) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
return;
}
else {
if (! IsCanonical(new_RIP)) {
BX_INFO(("JCC_Jq: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
RIP = new_RIP;
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
revalidate_prefetch_q();
}
#if BX_INSTRUMENTATION
else {
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
if ((--ECX) != 0) {
branch_near64(i);
BX_INSTR_CNEAR_BRANCH_TAKEN(BX_CPU_ID, RIP);
return;
}
}
#endif
BX_INSTR_CNEAR_BRANCH_NOT_TAKEN(BX_CPU_ID);
}
#endif /* if BX_SUPPORT_X86_64 */

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: ctrl_xfer_pro.cc,v 1.36 2005-03-20 18:08:46 sshwarts Exp $
// $Id: ctrl_xfer_pro.cc,v 1.37 2005-04-17 21:51:59 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -1934,6 +1934,26 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near32(Bit32u new_EIP)
revalidate_prefetch_q();
}
#if BX_SUPPORT_X86_64
void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near64(bxInstruction_c *i)
{
Bit64u new_RIP = RIP + (Bit32s) i->Id();
if (! i->os32L()) {
new_RIP &= 0xffff; // For 16-bit opSize, upper 48 bits of RIP are cleared.
}
else {
if (! IsCanonical(new_RIP)) {
BX_ERROR(("branch_near64: canonical RIP violation"));
exception(BX_GP_EXCEPTION, 0, 0);
}
}
RIP = new_RIP;
revalidate_prefetch_q();
}
#endif
void BX_CPU_C::validate_seg_regs(void)
{
Bit8u cs_dpl = BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl;