From 5da36b7d3da8393f67e0265bb2850727930b8e79 Mon Sep 17 00:00:00 2001 From: Stanislav Shwartsman Date: Fri, 29 Jul 2005 06:29:57 +0000 Subject: [PATCH] Fixed code duplication, added canonical address checking for RETF in long mode --- bochs/cpu/cpu.h | 18 +++--- bochs/cpu/ctrl_xfer_pro.cc | 111 ++++++++++++---------------------- bochs/cpu/proc_ctrl.cc | 4 +- bochs/cpu/segment_ctrl_pro.cc | 12 ++-- 4 files changed, 57 insertions(+), 88 deletions(-) diff --git a/bochs/cpu/cpu.h b/bochs/cpu/cpu.h index 3167b384e..63c77f32a 100644 --- a/bochs/cpu/cpu.h +++ b/bochs/cpu/cpu.h @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: cpu.h,v 1.226 2005-07-25 04:18:10 sshwarts Exp $ +// $Id: cpu.h,v 1.227 2005-07-29 06:29:57 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -328,11 +328,11 @@ #define BX_MSR_KERNELGSBASE 0xc0000102 #endif -#define BX_MODE_IA32_REAL 0x0 // CR0.PE=0 -#define BX_MODE_IA32_V8086 0x1 // CR0.PE=1, EFLAGS.VM=1 -#define BX_MODE_IA32_PROTECTED 0x2 // CR0.PE=1, EFLAGS.VM=0 -#define BX_MODE_LONG_COMPAT 0x3 // EFER.LMA = 0, EFER.LME = 1, CR0.PE=1 -#define BX_MODE_LONG_64 0x4 // EFER.LMA = 1, EFER.LME = 1, CR0.PE=1 +#define BX_MODE_IA32_REAL 0x0 // CR0.PE=0 | +#define BX_MODE_IA32_V8086 0x1 // CR0.PE=1, EFLAGS.VM=1 | EFER.LMA=0 +#define BX_MODE_IA32_PROTECTED 0x2 // CR0.PE=1, EFLAGS.VM=0 | +#define BX_MODE_LONG_COMPAT 0x3 // EFER.LMA = EFER.LME = 1, CR0.PE=1, CS.L=0 +#define BX_MODE_LONG_64 0x4 // EFER.LMA = EFER.LME = 1, CR0.PE=1, CS.L=1 #define BX_CANONICAL_BITS (48) @@ -2665,6 +2665,8 @@ public: // for now... #define Write_RMW_virtual_qword(val64) write_RMW_virtual_qword(val64) BX_SMF void branch_near32(Bit32u new_eip) BX_CPP_AttrRegparmN(1); + BX_SMF void branch_far(bx_selector_t *selector, + bx_descriptor_t *descriptor, bx_address rip, Bit8u cpl); #if BX_SUPPORT_X86_64 BX_SMF void branch_near64(bxInstruction_c *i) BX_CPP_AttrRegparmN(1); #endif @@ -2686,9 +2688,9 @@ public: // for now... #endif BX_SMF void access_linear(bx_address address, unsigned length, unsigned pl, - unsigned rw, void *data) BX_CPP_AttrRegparmN(3); + unsigned rw, void *data) BX_CPP_AttrRegparmN(3); BX_SMF Bit32u translate_linear(bx_address laddr, - unsigned pl, unsigned rw, unsigned access_type) BX_CPP_AttrRegparmN(3); + unsigned pl, unsigned rw, unsigned access_type) BX_CPP_AttrRegparmN(3); BX_SMF Bit32u itranslate_linear(bx_address laddr, unsigned pl) BX_CPP_AttrRegparmN(2); BX_SMF Bit32u dtranslate_linear(bx_address laddr, unsigned pl, unsigned rw) BX_CPP_AttrRegparmN(3); BX_SMF void TLB_flush(bx_bool invalidateGlobal); diff --git a/bochs/cpu/ctrl_xfer_pro.cc b/bochs/cpu/ctrl_xfer_pro.cc index 8822f5135..a3c8fc97a 100644 --- a/bochs/cpu/ctrl_xfer_pro.cc +++ b/bochs/cpu/ctrl_xfer_pro.cc @@ -1,5 +1,5 @@ //////////////////////////////////////////////////////////////////////// -// $Id: ctrl_xfer_pro.cc,v 1.45 2005-07-22 05:00:40 sshwarts Exp $ +// $Id: ctrl_xfer_pro.cc,v 1.46 2005-07-29 06:29:57 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -99,21 +99,8 @@ BX_CPU_C::jump_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address dispBig) return; } - /* instruction pointer must be in code segment limit else #GP(0) */ - if (! IS_LONG64_SEGMENT(descriptor)) - { - if (dispBig > descriptor.u.segment.limit_scaled) { - BX_ERROR(("jump_protected: EIP > limit")); - exception(BX_GP_EXCEPTION, 0, 0); - return; - } - } + branch_far(&selector, &descriptor, dispBig, CPL); - /* Load CS:IP from destination pointer */ - /* Load CS-cache with new segment descriptor */ - /* CPL does not change for conforming code segment */ - load_cs(&selector, &descriptor, CPL); - RIP = dispBig; return; } else { @@ -170,7 +157,6 @@ BX_CPU_C::jump_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address dispBig) if (EIP > BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled) { BX_ERROR(("jump_protected: EIP not within CS limits")); exception(BX_GP_EXCEPTION, 0, 0); - return; } return; @@ -1087,27 +1073,7 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes) return_RIP = return_IP; } - // EIP must be in code segment limit, else #GP(0) -#if BX_SUPPORT_X86_64 - if (IsLongMode()) { - if (! IsCanonical(return_RIP)) { - BX_ERROR(("branch_near64: canonical RIP violation")); - exception(BX_GP_EXCEPTION, 0, 0); - } - } - else -#endif - { - if (return_RIP > cs_descriptor.u.segment.limit_scaled) { - BX_ERROR(("return_protected: return RIP > CS.limit")); - exception(BX_GP_EXCEPTION, 0, 0); - } - } - - // load CS:EIP from stack - // load CS register with descriptor - load_cs(&cs_selector, &cs_descriptor, CPL); - RIP = return_RIP; + branch_far(&cs_selector, &cs_descriptor, return_RIP, CPL); // increment eSP #if BX_SUPPORT_X86_64 @@ -1293,29 +1259,7 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes) return; } - /* EIP must be in code segment limit, else #GP(0) */ -#if BX_SUPPORT_X86_64 - if (IsLongMode()) { - if (! IsCanonical(return_RIP)) { - BX_ERROR(("branch_near64: canonical RIP violation")); - exception(BX_GP_EXCEPTION, 0, 0); - } - } - else -#endif - { - if (return_RIP > cs_descriptor.u.segment.limit_scaled) { - BX_ERROR(("return_protected: EIP > CS.limit")); - exception(BX_GP_EXCEPTION, 0, 0); - } - } - - /* set CPL to RPL of return CS selector */ - /* load CS:IP from stack */ - /* set CS RPL to CPL */ - /* load the CS-cache with return CS descriptor */ - load_cs(&cs_selector, &cs_descriptor, cs_selector.rpl); - RIP = return_RIP; + branch_far(&cs_selector, &cs_descriptor, return_RIP, cs_selector.rpl); /* load SS:SP from stack */ /* load SS-cache with return SS descriptor */ @@ -1607,19 +1551,10 @@ BX_CPU_C::iret_protected(bxInstruction_c *i) access_linear(BX_CPU_THIS_PTR get_segment_base(BX_SEG_REG_SS) + temp_RSP + 24, 8, 0, BX_READ, &new_rsp); - /* RIP must be in code segment limit, else #GP(0) */ - if (cs_descriptor.u.segment.l == 0 && new_rip > cs_descriptor.u.segment.limit_scaled ) { - BX_ERROR(("iret: IP > descriptor limit")); - exception(BX_GP_EXCEPTION, 0, 0); - return; - } - - /* load CS:RIP from stack */ - /* load the CS-cache with CS descriptor */ - /* set CPL to the RPL of the return CS selector */ prev_cpl = CPL; /* previous CPL */ - load_cs(&cs_selector, &cs_descriptor, cs_selector.rpl); - BX_CPU_THIS_PTR rip = new_rip; + + /* set CPL to the RPL of the return CS selector */ + branch_far(&cs_selector, &cs_descriptor, new_rip, cs_selector.rpl); /* load flags from stack */ // perhaps I should always write_eflags(), thus zeroing @@ -1958,6 +1893,38 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near32(Bit32u new_EIP) revalidate_prefetch_q(); } +void BX_CPU_C::branch_far(bx_selector_t *selector, + bx_descriptor_t *descriptor, bx_address rip, Bit8u cpl) +{ +#if BX_SUPPORT_X86_64 + if (descriptor->u.segment.l) + { + if (! BX_CPU_THIS_PTR msr.lma) + BX_PANIC(("branch_far: attempt to enter x86-64 LONG mode without enabling EFER.LMA !")); + + if (! IsCanonical(rip)) { + BX_ERROR(("branch_far: canonical RIP violation")); + exception(BX_GP_EXCEPTION, 0, 0); + } + } + else +#endif + { + /* instruction pointer must be in code segment limit else #GP(0) */ + if (rip > descriptor->u.segment.limit_scaled) { + BX_ERROR(("branch_far: EIP > limit")); + exception(BX_GP_EXCEPTION, 0, 0); + } + } + + /* Load CS:IP from destination pointer */ + /* Load CS-cache with new segment descriptor */ + load_cs(selector, descriptor, cpl); + + /* Change the RIP value */ + RIP = rip; +} + #if BX_SUPPORT_X86_64 void BX_CPP_AttrRegparmN(1) BX_CPU_C::branch_near64(bxInstruction_c *i) { diff --git a/bochs/cpu/proc_ctrl.cc b/bochs/cpu/proc_ctrl.cc index 94052bec7..d240d9b7a 100644 --- a/bochs/cpu/proc_ctrl.cc +++ b/bochs/cpu/proc_ctrl.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: proc_ctrl.cc,v 1.109 2005-07-07 18:40:33 sshwarts Exp $ +// $Id: proc_ctrl.cc,v 1.110 2005-07-29 06:29:57 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -1339,7 +1339,7 @@ void BX_CPU_C::SetCR0(Bit32u val_32) if (prev_pg==0 && BX_CPU_THIS_PTR cr0.pg) { if (BX_CPU_THIS_PTR msr.lme) { if (!BX_CPU_THIS_PTR cr4.get_PAE()) { - BX_PANIC(("SetCR0: attempt to enter x86-64 LONG mode without enabling CR4.PAE !!!")); + BX_ERROR(("SetCR0: attempt to enter x86-64 LONG mode without enabling CR4.PAE !")); exception(BX_GP_EXCEPTION, 0, 0); } BX_CPU_THIS_PTR msr.lma = 1; diff --git a/bochs/cpu/segment_ctrl_pro.cc b/bochs/cpu/segment_ctrl_pro.cc index e01c0ba6a..a898e4e1f 100644 --- a/bochs/cpu/segment_ctrl_pro.cc +++ b/bochs/cpu/segment_ctrl_pro.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: segment_ctrl_pro.cc,v 1.40 2005-07-20 01:26:46 sshwarts Exp $ +// $Id: segment_ctrl_pro.cc,v 1.41 2005-07-29 06:29:57 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -381,7 +381,7 @@ BX_CPU_C::loadSRegLMNominal(unsigned segI, unsigned selector, bx_address base, // Load a segment register in long-mode with nominal values, // so descriptor cache values are compatible with existing checks. seg->cache.u.segment.base = base; - // (KPL) I doubt we need limit_scaled. If we do, it should be + // I doubt we need limit_scaled. If we do, it should be // of type bx_addr and be maxed to 64bits, not 32. seg->cache.u.segment.limit_scaled = 0xffffffff; seg->cache.valid = 1; @@ -544,13 +544,13 @@ BX_CPU_C::load_ldtr(bx_selector_t *selector, bx_descriptor_t *descriptor) BX_CPU_C::load_cs(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit8u cpl) { - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector = *selector; - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache = *descriptor; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector = *selector; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache = *descriptor; /* caller may request different CPL then in selector */ BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.rpl = cpl; - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = 1; /* ??? */ - // (BW) Added cpl to the selector value. + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = 1; + // Added cpl to the selector value. BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value = (0xfffc & BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value) | cpl;