diff --git a/bochs/config.h.in b/bochs/config.h.in index 05a30ed23..ed4362718 100644 --- a/bochs/config.h.in +++ b/bochs/config.h.in @@ -595,6 +595,32 @@ typedef #define BX_SupportICache 0 #define BX_SupportHostAsms 0 + +// ======================================================== +// These are some very temporary hacks I made to the 64-bit +// support to help Peter with debugging etc. They will be removed +// soon and there is no configure option for them (on purpose). +// By default, they are not compiled in. + +// Set this to 1 if you want to try my 64-bit hacks to get Linux booting. +#define KPL64Hacks 0 + +// I set this to 1 to bail in instructions which may not be honoring +// the 64-bit widths of RIP/RSP. If I trip a panic, then I clean +// the function up and remove the panic. You don't have to use +// these. +#if 0 +#define BailBigRSP(s) \ + if ( (RIP > 0xffffffff) || \ + (RSP > 0xffffffff) ) \ + BX_PANIC((s ": bailing due to big RSP value, mode==%u", \ + BX_CPU_THIS_PTR cpu_mode)) +#else +#define BailBigRSP(s) +#endif +// ======================================================== + + #if (BX_SUPPORT_MMX && BX_CPU_LEVEL < 5) #error With CPU level < 5, you must disable MMX support. #endif diff --git a/bochs/cpu/cpu.h b/bochs/cpu/cpu.h index 35168cc21..891b6f13e 100644 --- a/bochs/cpu/cpu.h +++ b/bochs/cpu/cpu.h @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: cpu.h,v 1.73 2002-09-22 22:22:16 kevinlawton Exp $ +// $Id: cpu.h,v 1.74 2002-09-24 00:44:55 kevinlawton Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -2981,4 +2981,5 @@ typedef enum _show_flags { // Can be used as LHS or RHS. #define RMAddr(i) (BX_CPU_THIS_PTR address_xlation.rm_addr) + #endif // #ifndef BX_CPU_H diff --git a/bochs/cpu/ctrl_xfer16.cc b/bochs/cpu/ctrl_xfer16.cc index c6be891e1..d5d04920b 100644 --- a/bochs/cpu/ctrl_xfer16.cc +++ b/bochs/cpu/ctrl_xfer16.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: ctrl_xfer16.cc,v 1.14 2002-09-22 18:22:24 kevinlawton Exp $ +// $Id: ctrl_xfer16.cc,v 1.15 2002-09-24 00:44:55 kevinlawton Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -33,61 +33,33 @@ - - void BX_CPU_C::RETnear16_Iw(bxInstruction_c *i) { +BailBigRSP("RETnear16_Iw"); Bit16u imm16; - Bit32u temp_ESP; Bit16u return_IP; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif - if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */ - temp_ESP = ESP; - else - temp_ESP = SP; - imm16 = i->Iw(); - invalidate_prefetch_q(); - - - if (protected_mode()) { - if ( !can_pop(2) ) { - BX_PANIC(("retnear_iw: can't pop IP")); - /* ??? #SS(0) -or #GP(0) */ - } - - access_linear(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_ESP + 0, - 2, CPL==3, BX_READ, &return_IP); - - if ( return_IP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) { - BX_PANIC(("retnear_iw: IP > limit")); - } - - if ( !can_pop(2 + imm16) ) { - BX_PANIC(("retnear_iw: can't release bytes from stack")); - /* #GP(0) -or #SS(0) ??? */ - } - - EIP = return_IP; - if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */ - ESP += 2 + imm16; /* ??? should it be 2*imm16 ? */ - else - SP += 2 + imm16; - } - else { - pop_16(&return_IP); - EIP = return_IP; - if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */ - ESP += imm16; /* ??? should it be 2*imm16 ? */ - else - SP += imm16; + pop_16(&return_IP); + if (protected_mode()) { + if ( return_IP > + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) { + BX_PANIC(("retnear_iw: IP > limit")); } + } + EIP = return_IP; + if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */ + ESP += imm16; /* ??? should it be 2*imm16 ? */ + else + SP += imm16; BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_RET, EIP); } @@ -95,44 +67,23 @@ BX_CPU_C::RETnear16_Iw(bxInstruction_c *i) void BX_CPU_C::RETnear16(bxInstruction_c *i) { - Bit32u temp_ESP; +BailBigRSP("RETnear16"); Bit16u return_IP; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif - invalidate_prefetch_q(); - - if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */ - temp_ESP = ESP; - else - temp_ESP = SP; - - - if (protected_mode()) { - if ( !can_pop(2) ) { - BX_PANIC(("retnear: can't pop IP")); - /* ??? #SS(0) -or #GP(0) */ - } - - access_linear(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_ESP + 0, - 2, CPL==3, BX_READ, &return_IP); - - if ( return_IP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) { - BX_PANIC(("retnear: IP > limit")); - } - - EIP = return_IP; - if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */ - ESP += 2; - else - SP += 2; - } - else { - pop_16(&return_IP); - EIP = return_IP; + pop_16(&return_IP); + if (protected_mode()) { + if ( return_IP > + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) { + BX_PANIC(("retnear: IP > limit")); } + } + EIP = return_IP; BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_RET, EIP); } @@ -140,9 +91,12 @@ BX_CPU_C::RETnear16(bxInstruction_c *i) void BX_CPU_C::RETfar16_Iw(bxInstruction_c *i) { +BailBigRSP("RETfar16_Iw"); Bit16s imm16; Bit16u ip, cs_raw; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif @@ -151,8 +105,6 @@ BX_CPU_C::RETfar16_Iw(bxInstruction_c *i) imm16 = i->Iw(); - invalidate_prefetch_q(); - #if BX_CPU_LEVEL >= 2 if (protected_mode()) { BX_CPU_THIS_PTR return_protected(i, imm16); @@ -161,14 +113,14 @@ BX_CPU_C::RETfar16_Iw(bxInstruction_c *i) #endif - pop_16(&ip); - pop_16(&cs_raw); - EIP = (Bit32u) ip; - load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw); - if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) - ESP += imm16; - else - SP += imm16; + pop_16(&ip); + pop_16(&cs_raw); + EIP = (Bit32u) ip; + load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw); + if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) + ESP += imm16; + else + SP += imm16; done: BX_INSTR_FAR_BRANCH(BX_INSTR_IS_RET, @@ -178,14 +130,15 @@ done: void BX_CPU_C::RETfar16(bxInstruction_c *i) { +BailBigRSP("RETfar16"); Bit16u ip, cs_raw; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif - invalidate_prefetch_q(); - #if BX_CPU_LEVEL >= 2 if ( protected_mode() ) { BX_CPU_THIS_PTR return_protected(i, 0); @@ -193,10 +146,10 @@ BX_CPU_C::RETfar16(bxInstruction_c *i) } #endif - pop_16(&ip); - pop_16(&cs_raw); - EIP = (Bit32u) ip; - load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw); + pop_16(&ip); + pop_16(&cs_raw); + EIP = (Bit32u) ip; + load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw); done: BX_INSTR_FAR_BRANCH(BX_INSTR_IS_RET, @@ -208,14 +161,15 @@ done: void BX_CPU_C::CALL_Aw(bxInstruction_c *i) { +BailBigRSP("CALL_Aw"); Bit32u new_EIP; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif - invalidate_prefetch_q(); - new_EIP = EIP + (Bit32s) i->Id(); new_EIP &= 0x0000ffff; #if BX_CPU_LEVEL >= 2 @@ -228,7 +182,6 @@ BX_CPU_C::CALL_Aw(bxInstruction_c *i) /* push 16 bit EA of next instruction */ push_16(IP); - EIP = new_EIP; BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_CALL, EIP); @@ -237,16 +190,18 @@ BX_CPU_C::CALL_Aw(bxInstruction_c *i) void BX_CPU_C::CALL16_Ap(bxInstruction_c *i) { +BailBigRSP("CALL16_Ap"); Bit16u cs_raw; Bit16u disp16; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif disp16 = i->Iw(); cs_raw = i->Iw2(); - invalidate_prefetch_q(); #if BX_CPU_LEVEL >= 2 if (protected_mode()) { @@ -254,6 +209,7 @@ BX_CPU_C::CALL16_Ap(bxInstruction_c *i) goto done; } #endif + push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value); push_16((Bit16u) EIP); EIP = (Bit32u) disp16; @@ -267,44 +223,34 @@ done: void BX_CPU_C::CALL_Ew(bxInstruction_c *i) { - Bit32u temp_ESP; +BailBigRSP("CALL_Ew"); Bit16u op1_16; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif - if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) - temp_ESP = ESP; - else - temp_ESP = SP; - - - /* op1_16 is a register or memory reference */ - if (i->modC0()) { - op1_16 = BX_READ_16BIT_REG(i->rm()); - } - else { - /* pointer, segment address pair */ - read_virtual_word(i->seg(), RMAddr(i), &op1_16); - } - invalidate_prefetch_q(); + if (i->modC0()) { + op1_16 = BX_READ_16BIT_REG(i->rm()); + } + else { + read_virtual_word(i->seg(), RMAddr(i), &op1_16); + } #if BX_CPU_LEVEL >= 2 - if (protected_mode()) { - if (op1_16 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) { - BX_PANIC(("call_ev: IP out of CS limits!")); - exception(BX_GP_EXCEPTION, 0, 0); - } - if ( !can_push(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache, temp_ESP, 2) ) { - BX_PANIC(("call_ev: can't push IP")); - } + if (protected_mode()) { + if (op1_16 > + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) { + BX_PANIC(("call_ev: IP out of CS limits!")); + exception(BX_GP_EXCEPTION, 0, 0); } + } #endif - push_16(IP); - - EIP = op1_16; + push_16(IP); + EIP = op1_16; BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_CALL, EIP); } @@ -312,33 +258,33 @@ BX_CPU_C::CALL_Ew(bxInstruction_c *i) void BX_CPU_C::CALL16_Ep(bxInstruction_c *i) { +BailBigRSP("CALL_16_Ep"); Bit16u cs_raw; Bit16u op1_16; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif - /* op1_16 is a register or memory reference */ - if (i->modC0()) { - BX_PANIC(("CALL_Ep: op1 is a register")); - } + if (i->modC0()) { + BX_PANIC(("CALL_Ep: op1 is a register")); + } - /* pointer, segment address pair */ - read_virtual_word(i->seg(), RMAddr(i), &op1_16); - read_virtual_word(i->seg(), RMAddr(i)+2, &cs_raw); - invalidate_prefetch_q(); + read_virtual_word(i->seg(), RMAddr(i), &op1_16); + read_virtual_word(i->seg(), RMAddr(i)+2, &cs_raw); - if ( protected_mode() ) { - BX_CPU_THIS_PTR call_protected(i, cs_raw, op1_16); - goto done; - } + if ( protected_mode() ) { + BX_CPU_THIS_PTR call_protected(i, cs_raw, op1_16); + goto done; + } - push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value); - push_16(IP); + push_16(BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value); + push_16(IP); - EIP = op1_16; - load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw); + EIP = op1_16; + load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw); done: BX_INSTR_FAR_BRANCH(BX_INSTR_IS_CALL, @@ -349,9 +295,9 @@ done: void BX_CPU_C::JMP_Jw(bxInstruction_c *i) { +BailBigRSP("JMP_Jw"); Bit32u new_EIP; - invalidate_prefetch_q(); new_EIP = EIP + (Bit32s) i->Id(); @@ -373,6 +319,7 @@ BX_CPU_C::JMP_Jw(bxInstruction_c *i) void BX_CPU_C::JCC_Jw(bxInstruction_c *i) { +BailBigRSP("JCC_Jw"); Boolean condition; switch (i->b1() & 0x0f) { @@ -428,6 +375,7 @@ BX_CPU_C::JCC_Jw(bxInstruction_c *i) void BX_CPU_C::JZ_Jw(bxInstruction_c *i) { +BailBigRSP("JZ_Jw"); if (get_ZF()) { Bit32u new_EIP; @@ -456,6 +404,7 @@ BX_CPU_C::JZ_Jw(bxInstruction_c *i) void BX_CPU_C::JNZ_Jw(bxInstruction_c *i) { +BailBigRSP("JNZ_Jw"); if (!get_ZF()) { Bit32u new_EIP; @@ -485,25 +434,25 @@ BX_CPU_C::JNZ_Jw(bxInstruction_c *i) void BX_CPU_C::JMP_Ew(bxInstruction_c *i) { +BailBigRSP("JMP_Ew"); Bit32u new_EIP; Bit16u op1_16; + invalidate_prefetch_q(); - /* op1_16 is a register or memory reference */ - if (i->modC0()) { - op1_16 = BX_READ_16BIT_REG(i->rm()); - } - else { - /* pointer, segment address pair */ - read_virtual_word(i->seg(), RMAddr(i), &op1_16); - } + if (i->modC0()) { + op1_16 = BX_READ_16BIT_REG(i->rm()); + } + else { + read_virtual_word(i->seg(), RMAddr(i), &op1_16); + } - invalidate_prefetch_q(); - new_EIP = op1_16; + new_EIP = op1_16; #if BX_CPU_LEVEL >= 2 if (protected_mode()) { - if (new_EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) { + if (new_EIP > + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) { BX_PANIC(("jmp_ev: IP out of CS limits!")); exception(BX_GP_EXCEPTION, 0, 0); } @@ -520,29 +469,29 @@ BX_CPU_C::JMP_Ew(bxInstruction_c *i) void BX_CPU_C::JMP16_Ep(bxInstruction_c *i) { +BailBigRSP("JMP16_Ep"); Bit16u cs_raw; Bit16u op1_16; - /* op1_16 is a register or memory reference */ - if (i->modC0()) { - /* far indirect must specify a memory address */ - BX_PANIC(("JMP_Ep(): op1 is a register")); - } + invalidate_prefetch_q(); - /* pointer, segment address pair */ - read_virtual_word(i->seg(), RMAddr(i), &op1_16); - read_virtual_word(i->seg(), RMAddr(i)+2, &cs_raw); - invalidate_prefetch_q(); + if (i->modC0()) { + /* far indirect must specify a memory address */ + BX_PANIC(("JMP_Ep(): op1 is a register")); + } + + read_virtual_word(i->seg(), RMAddr(i), &op1_16); + read_virtual_word(i->seg(), RMAddr(i)+2, &cs_raw); #if BX_CPU_LEVEL >= 2 - if ( protected_mode() ) { - BX_CPU_THIS_PTR jump_protected(i, cs_raw, op1_16); - goto done; - } + if ( protected_mode() ) { + BX_CPU_THIS_PTR jump_protected(i, cs_raw, op1_16); + goto done; + } #endif - EIP = op1_16; - load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw); + EIP = op1_16; + load_seg_reg(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS], cs_raw); done: BX_INSTR_FAR_BRANCH(BX_INSTR_IS_JMP, @@ -552,15 +501,16 @@ done: void BX_CPU_C::IRET16(bxInstruction_c *i) { +BailBigRSP("IRET16"); Bit16u ip, cs_raw, flags; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_iret; BX_CPU_THIS_PTR show_eip = EIP; #endif - invalidate_prefetch_q(); - if (v8086_mode()) { // IOPL check in stack_return_from_v86() stack_return_from_v86(i); diff --git a/bochs/cpu/ctrl_xfer32.cc b/bochs/cpu/ctrl_xfer32.cc index 851efb1cc..ea4d0d67d 100644 --- a/bochs/cpu/ctrl_xfer32.cc +++ b/bochs/cpu/ctrl_xfer32.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: ctrl_xfer32.cc,v 1.17 2002-09-22 18:22:24 kevinlawton Exp $ +// $Id: ctrl_xfer32.cc,v 1.18 2002-09-24 00:44:55 kevinlawton Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -38,10 +38,13 @@ void BX_CPU_C::RETnear32_Iw(bxInstruction_c *i) { +BailBigRSP("RETnear32_Iw"); Bit16u imm16; Bit32u temp_ESP; Bit32u return_EIP; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif @@ -53,9 +56,6 @@ BX_CPU_C::RETnear32_Iw(bxInstruction_c *i) imm16 = i->Iw(); - invalidate_prefetch_q(); - - if (protected_mode()) { if ( !can_pop(4) ) { BX_PANIC(("retnear_iw: can't pop EIP")); @@ -98,15 +98,16 @@ BX_CPU_C::RETnear32_Iw(bxInstruction_c *i) void BX_CPU_C::RETnear32(bxInstruction_c *i) { +BailBigRSP("RETnear32"); Bit32u temp_ESP; Bit32u return_EIP; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif - invalidate_prefetch_q(); - if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) /* 32bit stack */ temp_ESP = ESP; else @@ -143,9 +144,12 @@ BX_CPU_C::RETnear32(bxInstruction_c *i) void BX_CPU_C::RETfar32_Iw(bxInstruction_c *i) { +BailBigRSP("RETfar32_Iw"); Bit32u eip, ecs_raw; Bit16s imm16; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif @@ -153,8 +157,6 @@ BX_CPU_C::RETfar32_Iw(bxInstruction_c *i) imm16 = i->Iw(); - invalidate_prefetch_q(); - #if BX_CPU_LEVEL >= 2 if (protected_mode()) { BX_CPU_THIS_PTR return_protected(i, imm16); @@ -180,14 +182,15 @@ done: void BX_CPU_C::RETfar32(bxInstruction_c *i) { +BailBigRSP("RETfar32"); Bit32u eip, ecs_raw; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif - invalidate_prefetch_q(); - #if BX_CPU_LEVEL >= 2 if ( protected_mode() ) { BX_CPU_THIS_PTR return_protected(i, 0); @@ -211,15 +214,17 @@ done: void BX_CPU_C::CALL_Ad(bxInstruction_c *i) { +BailBigRSP("CALL_Ad"); Bit32u new_EIP; Bit32s disp32; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif disp32 = i->Id(); - invalidate_prefetch_q(); new_EIP = EIP + disp32; @@ -240,16 +245,18 @@ BX_CPU_C::CALL_Ad(bxInstruction_c *i) void BX_CPU_C::CALL32_Ap(bxInstruction_c *i) { +BailBigRSP("CALL32_Ap"); Bit16u cs_raw; Bit32u disp32; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif disp32 = i->Id(); cs_raw = i->Iw2(); - invalidate_prefetch_q(); if (protected_mode()) { BX_CPU_THIS_PTR call_protected(i, cs_raw, disp32); @@ -268,9 +275,12 @@ done: void BX_CPU_C::CALL_Ed(bxInstruction_c *i) { +BailBigRSP("CALL_Ed"); Bit32u temp_ESP; Bit32u op1_32; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif @@ -281,28 +291,23 @@ BX_CPU_C::CALL_Ed(bxInstruction_c *i) temp_ESP = SP; - /* op1_32 is a register or memory reference */ - if (i->modC0()) { - op1_32 = BX_READ_32BIT_REG(i->rm()); - } - else { - read_virtual_dword(i->seg(), RMAddr(i), &op1_32); - } - invalidate_prefetch_q(); + if (i->modC0()) { + op1_32 = BX_READ_32BIT_REG(i->rm()); + } + else { + read_virtual_dword(i->seg(), RMAddr(i), &op1_32); + } - if (protected_mode()) { - if (op1_32 > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) { - BX_DEBUG(("call_ev: EIP out of CS limits! at %s:%d")); - exception(BX_GP_EXCEPTION, 0, 0); - } - if ( !can_push(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache, temp_ESP, 4) ) { - BX_PANIC(("call_ev: can't push EIP")); - } + if (protected_mode()) { + if (op1_32 > + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) { + BX_DEBUG(("call_ev: EIP out of CS limits! at %s:%d")); + exception(BX_GP_EXCEPTION, 0, 0); } + } - push_32(EIP); - - EIP = op1_32; + push_32(EIP); + EIP = op1_32; BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_CALL, EIP); } @@ -310,9 +315,12 @@ BX_CPU_C::CALL_Ed(bxInstruction_c *i) void BX_CPU_C::CALL32_Ep(bxInstruction_c *i) { +BailBigRSP("CALL32_Ep"); Bit16u cs_raw; Bit32u op1_32; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif @@ -325,7 +333,6 @@ BX_CPU_C::CALL32_Ep(bxInstruction_c *i) /* pointer, segment address pair */ read_virtual_dword(i->seg(), RMAddr(i), &op1_32); read_virtual_word(i->seg(), RMAddr(i)+4, &cs_raw); - invalidate_prefetch_q(); if ( protected_mode() ) { BX_CPU_THIS_PTR call_protected(i, cs_raw, op1_32); @@ -347,9 +354,10 @@ done: void BX_CPU_C::JMP_Jd(bxInstruction_c *i) { +BailBigRSP("JMP_Jd"); Bit32u new_EIP; - invalidate_prefetch_q(); + invalidate_prefetch_q(); new_EIP = EIP + (Bit32s) i->Id(); @@ -397,19 +405,31 @@ BX_CPU_C::JCC_Jd(bxInstruction_c *i) } if (condition) { - Bit32u new_EIP; - - new_EIP = EIP + (Bit32s) i->Id(); -#if BX_CPU_LEVEL >= 2 - if (protected_mode()) { - if ( new_EIP > - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) { - BX_PANIC(("jo_routine: offset outside of CS limits")); - exception(BX_GP_EXCEPTION, 0, 0); - } +#if BX_SUPPORT_X86_64 +#if KPL64Hacks + if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) { + RIP += (Bit32s) i->Id(); } + else #endif - EIP = new_EIP; +#endif + { +BailBigRSP("JCC_Jd"); + Bit32u new_EIP; + + new_EIP = EIP + (Bit32s) i->Id(); +#if BX_CPU_LEVEL >= 2 + if (protected_mode()) { + if ( new_EIP > + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled ) + { + BX_PANIC(("jo_routine: offset outside of CS limits")); + exception(BX_GP_EXCEPTION, 0, 0); + } + } +#endif + EIP = new_EIP; + } BX_INSTR_CNEAR_BRANCH_TAKEN(new_EIP); revalidate_prefetch_q(); } @@ -423,6 +443,7 @@ BX_CPU_C::JCC_Jd(bxInstruction_c *i) void BX_CPU_C::JZ_Jd(bxInstruction_c *i) { +BailBigRSP("JZ_Jd"); if (get_ZF()) { Bit32u new_EIP; @@ -450,6 +471,7 @@ BX_CPU_C::JZ_Jd(bxInstruction_c *i) void BX_CPU_C::JNZ_Jd(bxInstruction_c *i) { +BailBigRSP("JNZ_Jd"); if (!get_ZF()) { Bit32u new_EIP; @@ -477,6 +499,7 @@ BX_CPU_C::JNZ_Jd(bxInstruction_c *i) void BX_CPU_C::JMP_Ap(bxInstruction_c *i) { +BailBigRSP("JMP_Ap"); Bit32u disp32; Bit16u cs_raw; @@ -511,9 +534,12 @@ done: void BX_CPU_C::JMP_Ed(bxInstruction_c *i) { +BailBigRSP("JMP_Ed"); Bit32u new_EIP; Bit32u op1_32; + invalidate_prefetch_q(); + /* op1_32 is a register or memory reference */ if (i->modC0()) { op1_32 = BX_READ_32BIT_REG(i->rm()); @@ -523,7 +549,6 @@ BX_CPU_C::JMP_Ed(bxInstruction_c *i) read_virtual_dword(i->seg(), RMAddr(i), &op1_32); } - invalidate_prefetch_q(); new_EIP = op1_32; #if BX_CPU_LEVEL >= 2 @@ -545,9 +570,12 @@ BX_CPU_C::JMP_Ed(bxInstruction_c *i) void BX_CPU_C::JMP32_Ep(bxInstruction_c *i) { +BailBigRSP("JMP32_Ep"); Bit16u cs_raw; Bit32u op1_32; + invalidate_prefetch_q(); + /* op1_32 is a register or memory reference */ if (i->modC0()) { /* far indirect must specify a memory address */ @@ -557,7 +585,6 @@ BX_CPU_C::JMP32_Ep(bxInstruction_c *i) /* pointer, segment address pair */ read_virtual_dword(i->seg(), RMAddr(i), &op1_32); read_virtual_word(i->seg(), RMAddr(i)+4, &cs_raw); - invalidate_prefetch_q(); if ( protected_mode() ) { BX_CPU_THIS_PTR jump_protected(i, cs_raw, op1_32); @@ -575,15 +602,16 @@ done: void BX_CPU_C::IRET32(bxInstruction_c *i) { +BailBigRSP("IRET32"); Bit32u eip, ecs_raw, eflags; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_iret; BX_CPU_THIS_PTR show_eip = EIP; #endif - invalidate_prefetch_q(); - if (v8086_mode()) { // IOPL check in stack_return_from_v86() stack_return_from_v86(i); diff --git a/bochs/cpu/ctrl_xfer64.cc b/bochs/cpu/ctrl_xfer64.cc index 025cc39ab..17e20301b 100644 --- a/bochs/cpu/ctrl_xfer64.cc +++ b/bochs/cpu/ctrl_xfer64.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: ctrl_xfer64.cc,v 1.10 2002-09-22 18:22:24 kevinlawton Exp $ +// $Id: ctrl_xfer64.cc,v 1.11 2002-09-24 00:44:55 kevinlawton Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -42,6 +42,8 @@ BX_CPU_C::RETnear64_Iw(bxInstruction_c *i) Bit64u temp_RSP; Bit64u return_RIP; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif @@ -50,9 +52,6 @@ BX_CPU_C::RETnear64_Iw(bxInstruction_c *i) imm16 = i->Iw(); - invalidate_prefetch_q(); - - //if ( !can_pop(8) ) { // BX_PANIC(("retnear_iw: can't pop RIP")); // /* ??? #SS(0) -or #GP(0) */ @@ -79,12 +78,12 @@ BX_CPU_C::RETnear64(bxInstruction_c *i) Bit64u temp_RSP; Bit64u return_RIP; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif - invalidate_prefetch_q(); - temp_RSP = RSP; //if ( !can_pop(8) ) { @@ -107,6 +106,8 @@ BX_CPU_C::RETfar64_Iw(bxInstruction_c *i) Bit64u rip, rcs_raw; Bit16s imm16; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif @@ -114,8 +115,6 @@ BX_CPU_C::RETfar64_Iw(bxInstruction_c *i) imm16 = i->Iw(); - invalidate_prefetch_q(); - #if BX_CPU_LEVEL >= 2 if (protected_mode()) { BX_CPU_THIS_PTR return_protected(i, imm16); @@ -139,12 +138,12 @@ BX_CPU_C::RETfar64(bxInstruction_c *i) { Bit64u rip, rcs_raw; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_ret; #endif - invalidate_prefetch_q(); - #if BX_CPU_LEVEL >= 2 if ( protected_mode() ) { BX_CPU_THIS_PTR return_protected(i, 0); @@ -171,12 +170,13 @@ BX_CPU_C::CALL_Aq(bxInstruction_c *i) Bit64u new_RIP; Bit32s disp32; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif disp32 = i->Id(); - invalidate_prefetch_q(); new_RIP = RIP + disp32; @@ -193,13 +193,14 @@ BX_CPU_C::CALL64_Ap(bxInstruction_c *i) Bit16u cs_raw; Bit32u disp32; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif disp32 = i->Id(); cs_raw = i->Iw2(); - invalidate_prefetch_q(); if (protected_mode()) { BX_CPU_THIS_PTR call_protected(i, cs_raw, disp32); @@ -221,29 +222,23 @@ BX_CPU_C::CALL_Eq(bxInstruction_c *i) Bit64u temp_RSP; Bit64u op1_64; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif temp_RSP = RSP; + if (i->modC0()) { + op1_64 = BX_READ_64BIT_REG(i->rm()); + } + else { + read_virtual_qword(i->seg(), RMAddr(i), &op1_64); + } - /* op1_64 is a register or memory reference */ - if (i->modC0()) { - op1_64 = BX_READ_64BIT_REG(i->rm()); - } - else { - read_virtual_qword(i->seg(), RMAddr(i), &op1_64); - } - invalidate_prefetch_q(); - - if ( !can_push(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache, temp_RSP, 8) ) { - BX_PANIC(("call_ev: can't push RIP")); - } - - push_64(BX_CPU_THIS_PTR rip); - - RIP = op1_64; + push_64(BX_CPU_THIS_PTR rip); + RIP = op1_64; BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_CALL, BX_CPU_THIS_PTR rip); } @@ -254,6 +249,8 @@ BX_CPU_C::CALL64_Ep(bxInstruction_c *i) Bit16u cs_raw; Bit64u op1_64; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_call; #endif @@ -266,7 +263,6 @@ BX_CPU_C::CALL64_Ep(bxInstruction_c *i) /* pointer, segment address pair */ read_virtual_qword(i->seg(), RMAddr(i), &op1_64); read_virtual_word(i->seg(), RMAddr(i)+8, &cs_raw); - invalidate_prefetch_q(); if ( protected_mode() ) { BX_CPU_THIS_PTR call_protected(i, cs_raw, op1_64); @@ -381,17 +377,15 @@ BX_CPU_C::JMP_Eq(bxInstruction_c *i) Bit64u new_RIP; Bit64u op1_64; - /* op1_64 is a register or memory reference */ + invalidate_prefetch_q(); + if (i->modC0()) { op1_64 = BX_READ_64BIT_REG(i->rm()); } else { - /* pointer, segment address pair */ read_virtual_qword(i->seg(), RMAddr(i), &op1_64); } - invalidate_prefetch_q(); - RIP = op1_64; BX_INSTR_UCNEAR_BRANCH(BX_INSTR_IS_JMP, new_RIP); @@ -405,16 +399,14 @@ BX_CPU_C::JMP64_Ep(bxInstruction_c *i) Bit16u cs_raw; Bit64u op1_64; - /* op1_32 is a register or memory reference */ + invalidate_prefetch_q(); + if (i->modC0()) { - /* far indirect must specify a memory address */ BX_PANIC(("JMP_Ep(): op1 is a register")); } - /* pointer, segment address pair */ read_virtual_qword(i->seg(), RMAddr(i), &op1_64); read_virtual_word(i->seg(), RMAddr(i)+8, &cs_raw); - invalidate_prefetch_q(); if ( protected_mode() ) { BX_CPU_THIS_PTR jump_protected(i, cs_raw, op1_64); @@ -434,13 +426,13 @@ BX_CPU_C::IRET64(bxInstruction_c *i) { Bit32u rip, ecs_raw, eflags; + invalidate_prefetch_q(); + #if BX_DEBUGGER BX_CPU_THIS_PTR show_flag |= Flag_iret; BX_CPU_THIS_PTR show_eip = BX_CPU_THIS_PTR rip; #endif - invalidate_prefetch_q(); - #if BX_CPU_LEVEL >= 2 if (BX_CPU_THIS_PTR cr0.pe) { iret_protected(i); diff --git a/bochs/cpu/ctrl_xfer8.cc b/bochs/cpu/ctrl_xfer8.cc index cfe9adb56..4ee0c101d 100644 --- a/bochs/cpu/ctrl_xfer8.cc +++ b/bochs/cpu/ctrl_xfer8.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: ctrl_xfer8.cc,v 1.10 2002-09-18 05:36:47 kevinlawton Exp $ +// $Id: ctrl_xfer8.cc,v 1.11 2002-09-24 00:44:55 kevinlawton Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -42,7 +42,7 @@ void BX_CPU_C::JCXZ_Jb(bxInstruction_c *i) { - +BailBigRSP("JCXZ_Jb"); if (i->as64L()) { if ( RCX == 0 ) { RIP += (Bit32s) i->Id(); @@ -94,6 +94,7 @@ BX_CPU_C::JCXZ_Jb(bxInstruction_c *i) void BX_CPU_C::LOOPNE_Jb(bxInstruction_c *i) { +BailBigRSP("loopne_jb"); if (i->as64L()) { if ( ((--RCX)!=0) && (get_ZF()==0) ) { @@ -150,6 +151,7 @@ BX_CPU_C::LOOPNE_Jb(bxInstruction_c *i) void BX_CPU_C::LOOPE_Jb(bxInstruction_c *i) { +BailBigRSP("loope_jb"); if (i->as64L()) { if ( ((--RCX)!=0) && (get_ZF()) ) { @@ -206,6 +208,7 @@ BX_CPU_C::LOOPE_Jb(bxInstruction_c *i) void BX_CPU_C::LOOP_Jb(bxInstruction_c *i) { +BailBigRSP("loop_jb"); if (i->as64L()) { if ( ((--RCX)!=0) ) { diff --git a/bochs/cpu/ctrl_xfer_pro.cc b/bochs/cpu/ctrl_xfer_pro.cc index a617b0469..aee0b3026 100644 --- a/bochs/cpu/ctrl_xfer_pro.cc +++ b/bochs/cpu/ctrl_xfer_pro.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: ctrl_xfer_pro.cc,v 1.16 2002-09-18 05:36:47 kevinlawton Exp $ +// $Id: ctrl_xfer_pro.cc,v 1.17 2002-09-24 00:44:55 kevinlawton Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -1607,6 +1607,19 @@ BX_CPU_C::iret_protected(bxInstruction_c *i) // return; // } + +#if KPL64Hacks +unsigned ssIsNull = 0; +if ( (raw_ss_selector & 0xfffc) == 0 ) { + BX_INFO(("iret: SS NULL OK in LM")); + parse_selector(0,&ss_selector); + parse_descriptor(0,0,&ss_descriptor); + load_ss_null(&ss_selector, &ss_descriptor, cs_descriptor.dpl); + ssIsNull = 1; + goto afterSSChecks; + } +#endif + parse_selector(raw_ss_selector, &ss_selector); /* selector RPL must = RPL of return CS selector, @@ -1650,6 +1663,9 @@ BX_CPU_C::iret_protected(bxInstruction_c *i) return; } +#if KPL64Hacks +afterSSChecks: +#endif access_linear(BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base + temp_RSP + 0, 8, 0, BX_READ, &new_rip); @@ -1680,9 +1696,13 @@ BX_CPU_C::iret_protected(bxInstruction_c *i) else write_flags((Bit16u) new_eflags, prev_cpl==0, prev_cpl<=BX_CPU_THIS_PTR get_IOPL()); - // load SS:RSP from stack - // load the SS-cache with SS descriptor - load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl); +#if KPL64Hacks + if (!ssIsNull) { + // load SS:RSP from stack + // load the SS-cache with SS descriptor + load_ss(&ss_selector, &ss_descriptor, cs_selector.rpl); + } +#endif RSP = new_rsp; validate_seg_regs(); diff --git a/bochs/cpu/proc_ctrl.cc b/bochs/cpu/proc_ctrl.cc index 2ef431deb..bf4301d88 100644 --- a/bochs/cpu/proc_ctrl.cc +++ b/bochs/cpu/proc_ctrl.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: proc_ctrl.cc,v 1.47 2002-09-23 14:45:44 bdenney Exp $ +// $Id: proc_ctrl.cc,v 1.48 2002-09-24 00:44:56 kevinlawton Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -1775,7 +1775,12 @@ BX_CPU_C::RDMSR(bxInstruction_c *i) #endif // #if BX_SUPPORT_X86_64 default: +#if KPL64Hacks + BX_INFO(("RDMSR: Unknown register %#x", ECX)); + return; +#else BX_PANIC(("RDMSR: Unknown register %#x", ECX)); +#endif goto do_exception; } @@ -1875,7 +1880,12 @@ BX_CPU_C::WRMSR(bxInstruction_c *i) #endif // #if BX_SUPPORT_X86_64 default: +#if KPL64Hacks + BX_INFO(("WRMSR: Unknown register %#x", ECX)); + return; +#else BX_PANIC(("WRMSR: Unknown register %#x", ECX)); +#endif goto do_exception; } diff --git a/bochs/cpu/segment_ctrl_pro.cc b/bochs/cpu/segment_ctrl_pro.cc index 3a3dc9c08..ca1f5c034 100644 --- a/bochs/cpu/segment_ctrl_pro.cc +++ b/bochs/cpu/segment_ctrl_pro.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: segment_ctrl_pro.cc,v 1.14 2002-09-19 19:17:20 kevinlawton Exp $ +// $Id: segment_ctrl_pro.cc,v 1.15 2002-09-24 00:44:56 kevinlawton Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -561,6 +561,18 @@ BX_CPU_C::load_ss_null(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector = *selector; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache = *descriptor; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.rpl = cpl; + +#if KPL64Hacks + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = 1; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.executable = 0; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.c_ed = 0; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.r_w = 1; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.a = 1; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base = 0; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.l = 1; +#endif } #endif diff --git a/bochs/cpu/stack32.cc b/bochs/cpu/stack32.cc index 9c2a20328..cd16849f4 100644 --- a/bochs/cpu/stack32.cc +++ b/bochs/cpu/stack32.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: stack32.cc,v 1.15 2002-09-20 23:17:51 kevinlawton Exp $ +// $Id: stack32.cc,v 1.16 2002-09-24 00:44:56 kevinlawton Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -110,6 +110,7 @@ BX_CPU_C::PUSH_ES(bxInstruction_c *i) void BX_CPU_C::PUSH_FS(bxInstruction_c *i) { +BailBigRSP("push_fs"); if (i->os32L()) push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value); else @@ -118,6 +119,7 @@ BX_CPU_C::PUSH_FS(bxInstruction_c *i) void BX_CPU_C::PUSH_GS(bxInstruction_c *i) { +BailBigRSP("push_gs"); if (i->os32L()) push_32(BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value); else @@ -164,6 +166,7 @@ BX_CPU_C::POP_ES(bxInstruction_c *i) void BX_CPU_C::POP_FS(bxInstruction_c *i) { +BailBigRSP("pop_fs"); if (i->os32L()) { Bit32u fs; pop_32(&fs); @@ -178,6 +181,7 @@ BX_CPU_C::POP_FS(bxInstruction_c *i) void BX_CPU_C::POP_GS(bxInstruction_c *i) { +BailBigRSP("pop_gs"); if (i->os32L()) { Bit32u gs; pop_32(&gs); diff --git a/bochs/cpu/stack_pro.cc b/bochs/cpu/stack_pro.cc index 8497a003f..05fd68d21 100644 --- a/bochs/cpu/stack_pro.cc +++ b/bochs/cpu/stack_pro.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: stack_pro.cc,v 1.7 2002-09-15 01:00:20 kevinlawton Exp $ +// $Id: stack_pro.cc,v 1.8 2002-09-24 00:44:56 kevinlawton Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -39,6 +39,7 @@ void BX_CPU_C::push_16(Bit16u value16) { +BailBigRSP("push_16"); Bit32u temp_ESP; @@ -90,6 +91,7 @@ BX_CPU_C::push_16(Bit16u value16) void BX_CPU_C::push_32(Bit32u value32) { +BailBigRSP("push_32"); /* must use StackAddrSize, and either ESP or SP accordingly */ if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) { /* StackAddrSize = 32 */ /* 32bit stack size: pushes use SS:ESP */ @@ -152,6 +154,7 @@ BX_CPU_C::push_64(Bit64u value64) void BX_CPU_C::pop_16(Bit16u *value16_ptr) { +BailBigRSP("pop_16"); Bit32u temp_ESP; #if BX_CPU_LEVEL >= 3 @@ -185,6 +188,7 @@ BX_CPU_C::pop_16(Bit16u *value16_ptr) void BX_CPU_C::pop_32(Bit32u *value32_ptr) { +BailBigRSP("pop_32"); Bit32u temp_ESP; /* 32 bit stack mode: use SS:ESP */ @@ -237,6 +241,13 @@ BX_CPU_C::pop_64(Bit64u *value64_ptr) Boolean BX_CPU_C::can_push(bx_descriptor_t *descriptor, Bit32u esp, Bit32u bytes) { +#if BX_SUPPORT_X86_64 +#if KPL64Hacks + if ( BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64 ) + return(1); // SS segment is ignore in long mode, to my knowlege. (KPL) +#endif +#endif + if ( real_mode() ) { /* code not needed ??? */ BX_PANIC(("can_push(): called in real mode")); return(0); /* never gets here */ @@ -324,6 +335,13 @@ BX_CPU_C::can_pop(Bit32u bytes) { Bit32u temp_ESP, expand_down_limit; +#if BX_SUPPORT_X86_64 +#if KPL64Hacks + if ( BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64 ) + return(1); // SS segment is ignore in long mode, to my knowlege. (KPL) +#endif +#endif + /* ??? */ if (real_mode()) BX_PANIC(("can_pop(): called in real mode?"));