From 70f513b07b3c30f1092e1e0f59c56f0b48953c53 Mon Sep 17 00:00:00 2001 From: Stanislav Shwartsman Date: Mon, 10 Sep 2007 20:47:08 +0000 Subject: [PATCH] Make efer control MSR separate register --- bochs/cpu/cpu.h | 36 ++++++++++++++++++++------------- bochs/cpu/ctrl_xfer_pro.cc | 6 +++--- bochs/cpu/init.cc | 21 ++++++++++--------- bochs/cpu/iret.cc | 5 ++--- bochs/cpu/paging.cc | 16 +++++++-------- bochs/cpu/proc_ctrl.cc | 38 +++++++++++++++++++---------------- bochs/cpu/segment_ctrl_pro.cc | 4 ++-- bochs/cpu/smm.cc | 26 ++++++++++++------------ 8 files changed, 82 insertions(+), 70 deletions(-) diff --git a/bochs/cpu/cpu.h b/bochs/cpu/cpu.h index 5d7dbc259..84a302789 100644 --- a/bochs/cpu/cpu.h +++ b/bochs/cpu/cpu.h @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: cpu.h,v 1.324 2007-09-10 16:00:14 sshwarts Exp $ +// $Id: cpu.h,v 1.325 2007-09-10 20:47:08 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -537,13 +537,6 @@ typedef struct #endif #if BX_SUPPORT_X86_64 - // x86-64 EFER bits - bx_bool sce; // system call extensions - bx_bool lme; // long mode enable - bx_bool lma; // long mode active - bx_bool nxe; // no-execute enable - bx_bool ffxsr; // fast FXSAVE/FXRSTOR - Bit64u star; Bit64u lstar; Bit64u cstar; @@ -571,6 +564,17 @@ typedef struct } bx_regs_msr_t; #endif +#if BX_SUPPORT_X86_64 +typedef struct bx_efer_t { + // x86-64 EFER bits + bx_bool sce; // system call extensions + bx_bool lme; // long mode enable + bx_bool lma; // long mode active + bx_bool nxe; // no-execute enable + bx_bool ffxsr; // fast FXSAVE/FXRSTOR +} bx_efer_t; +#endif + #include "crregs.h" #include "descriptor.h" @@ -1037,6 +1041,10 @@ public: // for now... #endif #endif +#if BX_SUPPORT_X86_64 + bx_efer_t efer; +#endif + /* SMM base register */ Bit32u smbase; @@ -3145,11 +3153,11 @@ BX_CPP_INLINE void BX_CPU_C::set_reg64(unsigned reg, Bit64u val) #if BX_SUPPORT_X86_64 BX_CPP_INLINE Bit32u BX_CPU_C::get_EFER(void) { - return (BX_CPU_THIS_PTR msr.sce << 0) | - (BX_CPU_THIS_PTR msr.lme << 8) | - (BX_CPU_THIS_PTR msr.lma << 10) | - (BX_CPU_THIS_PTR msr.nxe << 11) | - (BX_CPU_THIS_PTR msr.ffxsr << 14); + return (BX_CPU_THIS_PTR efer.sce << 0) | + (BX_CPU_THIS_PTR efer.lme << 8) | + (BX_CPU_THIS_PTR efer.lma << 10) | + (BX_CPU_THIS_PTR efer.nxe << 11) | + (BX_CPU_THIS_PTR efer.ffxsr << 14); } #endif @@ -3176,7 +3184,7 @@ BX_CPP_INLINE bx_bool BX_CPU_C::protected_mode(void) BX_CPP_INLINE unsigned BX_CPU_C::long_mode(void) { #if BX_SUPPORT_X86_64 - return BX_CPU_THIS_PTR msr.lma; + return BX_CPU_THIS_PTR efer.lma; #else return 0; #endif diff --git a/bochs/cpu/ctrl_xfer_pro.cc b/bochs/cpu/ctrl_xfer_pro.cc index d7960a06f..4a838a755 100644 --- a/bochs/cpu/ctrl_xfer_pro.cc +++ b/bochs/cpu/ctrl_xfer_pro.cc @@ -1,5 +1,5 @@ //////////////////////////////////////////////////////////////////////// -// $Id: ctrl_xfer_pro.cc,v 1.56 2007-03-14 21:15:15 sshwarts Exp $ +// $Id: ctrl_xfer_pro.cc,v 1.57 2007-09-10 20:47:08 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -51,7 +51,7 @@ void BX_CPU_C::check_cs(bx_descriptor_t *descriptor, Bit16u cs_raw, Bit8u check_ #if BX_SUPPORT_X86_64 if (descriptor->u.segment.l) { - if (! BX_CPU_THIS_PTR msr.lma) { + if (! BX_CPU_THIS_PTR efer.lma) { BX_PANIC(("check_cs: attempt to jump to long mode without enabling EFER.LMA !")); } @@ -104,7 +104,7 @@ BX_CPU_C::load_cs(bx_selector_t *selector, bx_descriptor_t *descriptor, Bit8u cp (0xfffc & BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector.value) | cpl; #if BX_SUPPORT_X86_64 - if (BX_CPU_THIS_PTR msr.lma) { + if (BX_CPU_THIS_PTR efer.lma) { if (descriptor->u.segment.l) { BX_CPU_THIS_PTR cpu_mode = BX_MODE_LONG_64; BX_DEBUG(("Long Mode Activated")); diff --git a/bochs/cpu/init.cc b/bochs/cpu/init.cc index a0421a8f1..98fd721ad 100644 --- a/bochs/cpu/init.cc +++ b/bochs/cpu/init.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: init.cc,v 1.130 2007-07-31 20:25:52 sshwarts Exp $ +// $Id: init.cc,v 1.131 2007-09-10 20:47:08 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -647,11 +647,11 @@ Bit64s BX_CPU_C::param_restore(bx_param_c *param, Bit64s val) BX_CPU_THIS_PTR setEFlags((Bit32u)val); #if BX_SUPPORT_X86_64 } else if (!strcmp(pname, "EFER")) { - BX_CPU_THIS_PTR msr.sce = (val >> 0) & 1; - BX_CPU_THIS_PTR msr.lme = (val >> 8) & 1; - BX_CPU_THIS_PTR msr.lma = (val >> 10) & 1; - BX_CPU_THIS_PTR msr.nxe = (val >> 11) & 1; - BX_CPU_THIS_PTR msr.ffxsr = (val >> 14) & 1; + BX_CPU_THIS_PTR efer.sce = (val >> 0) & 1; + BX_CPU_THIS_PTR efer.lme = (val >> 8) & 1; + BX_CPU_THIS_PTR efer.lma = (val >> 10) & 1; + BX_CPU_THIS_PTR efer.nxe = (val >> 11) & 1; + BX_CPU_THIS_PTR efer.ffxsr = (val >> 14) & 1; #endif } else if (!strcmp(pname, "ar_byte") || !strcmp(pname, "selector")) { segname = param->get_parent()->get_name(); @@ -946,9 +946,10 @@ void BX_CPU_C::reset(unsigned source) BX_CPU_THIS_PTR msr.apicbase |= 0x900; #endif #if BX_SUPPORT_X86_64 - BX_CPU_THIS_PTR msr.lme = BX_CPU_THIS_PTR msr.lma = 0; - BX_CPU_THIS_PTR msr.sce = BX_CPU_THIS_PTR msr.nxe = 0; - BX_CPU_THIS_PTR msr.ffxsr = 0; + BX_CPU_THIS_PTR efer.lme = BX_CPU_THIS_PTR efer.lma = 0; + BX_CPU_THIS_PTR efer.sce = BX_CPU_THIS_PTR efer.nxe = 0; + BX_CPU_THIS_PTR efer.ffxsr = 0; + BX_CPU_THIS_PTR msr.star = 0; BX_CPU_THIS_PTR msr.lstar = 0; BX_CPU_THIS_PTR msr.cstar = 0; @@ -1105,7 +1106,7 @@ void BX_CPU_C::assert_checks(void) { // check CPU mode consistency #if BX_SUPPORT_X86_64 - if (BX_CPU_THIS_PTR msr.lma) { + if (BX_CPU_THIS_PTR efer.lma) { if (! BX_CPU_THIS_PTR cr0.get_PE()) { BX_PANIC(("assert_checks: EFER.LMA is set when CR0.PE=0 !")); } diff --git a/bochs/cpu/iret.cc b/bochs/cpu/iret.cc index 24e3a23cb..175076c1c 100755 --- a/bochs/cpu/iret.cc +++ b/bochs/cpu/iret.cc @@ -1,5 +1,5 @@ //////////////////////////////////////////////////////////////////////// -// $Id: iret.cc,v 1.18 2007-03-14 21:15:15 sshwarts Exp $ +// $Id: iret.cc,v 1.19 2007-09-10 20:47:08 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -46,8 +46,7 @@ BX_CPU_C::iret_protected(bxInstruction_c *i) bx_descriptor_t cs_descriptor, ss_descriptor; #if BX_SUPPORT_X86_64 - if (BX_CPU_THIS_PTR msr.lma) - { + if (long_mode()) { long_iret(i); return; } diff --git a/bochs/cpu/paging.cc b/bochs/cpu/paging.cc index 48c7b890d..a4d8bd86b 100644 --- a/bochs/cpu/paging.cc +++ b/bochs/cpu/paging.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: paging.cc,v 1.84 2007-08-30 16:48:10 sshwarts Exp $ +// $Id: paging.cc,v 1.85 2007-09-10 20:47:08 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -584,7 +584,7 @@ void BX_CPU_C::page_fault(unsigned fault, bx_address laddr, unsigned pl, unsigne error_code |= (pl << 2) | (rw << 1); #if BX_SUPPORT_X86_64 - if (BX_CPU_THIS_PTR msr.nxe && (access_type == CODE_ACCESS)) + if (BX_CPU_THIS_PTR efer.nxe && (access_type == CODE_ACCESS)) error_code |= ERROR_CODE_ACCESS; // I/D = 1 #endif BX_CPU_THIS_PTR cr2 = laddr; @@ -669,7 +669,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned pl, unsigne page_fault(ERROR_NOT_PRESENT, laddr, pl, isWrite, access_type); // PML4 Entry NOT present } if (pml4 & PAGE_DIRECTORY_NX_BIT) { - if (! BX_CPU_THIS_PTR msr.nxe) { + if (! BX_CPU_THIS_PTR efer.nxe) { BX_DEBUG(("PML4: NX bit set when EFER.NXE is disabled")); page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, isWrite, access_type); } @@ -704,10 +704,10 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned pl, unsigne page_fault(ERROR_NOT_PRESENT, laddr, pl, isWrite, access_type); // PDP Entry NOT present } #if BX_SUPPORT_X86_64 - if (BX_CPU_THIS_PTR msr.lma) + if (BX_CPU_THIS_PTR efer.lma) { if (pdp & PAGE_DIRECTORY_NX_BIT) { - if (! BX_CPU_THIS_PTR msr.nxe) { + if (! BX_CPU_THIS_PTR efer.nxe) { BX_DEBUG(("PAE PDP: NX bit set when EFER.NXE is disabled")); page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, isWrite, access_type); } @@ -738,7 +738,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned pl, unsigne #if BX_SUPPORT_X86_64 if (pde & PAGE_DIRECTORY_NX_BIT) { - if (! BX_CPU_THIS_PTR msr.nxe) { + if (! BX_CPU_THIS_PTR efer.nxe) { BX_DEBUG(("PAE PDE: NX bit set when EFER.NXE is disabled")); page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, isWrite, access_type); } @@ -800,7 +800,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned pl, unsigne #if BX_SUPPORT_X86_64 if (pte & PAGE_DIRECTORY_NX_BIT) { - if (! BX_CPU_THIS_PTR msr.nxe) { + if (! BX_CPU_THIS_PTR efer.nxe) { BX_DEBUG(("PAE PTE: NX bit set when EFER.NXE is disabled")); page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, pl, isWrite, access_type); } @@ -854,7 +854,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned pl, unsigne else #endif // #if BX_SUPPORT_PAE { - // CR4.PAE==0 (and MSR.LMA==0) + // CR4.PAE==0 (and EFER.LMA==0) Bit32u pde, pte; bx_phy_address pde_addr; diff --git a/bochs/cpu/proc_ctrl.cc b/bochs/cpu/proc_ctrl.cc index e8c16731a..6db8c5f50 100644 --- a/bochs/cpu/proc_ctrl.cc +++ b/bochs/cpu/proc_ctrl.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: proc_ctrl.cc,v 1.167 2007-07-31 20:25:52 sshwarts Exp $ +// $Id: proc_ctrl.cc,v 1.168 2007-09-10 20:47:08 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -1210,7 +1210,7 @@ void BX_CPU_C::LOADALL(bxInstruction_c *i) void BX_CPU_C::handleCpuModeChange(void) { #if BX_SUPPORT_X86_64 - if (BX_CPU_THIS_PTR msr.lma) { + if (BX_CPU_THIS_PTR efer.lma) { if (! BX_CPU_THIS_PTR cr0.get_PE()) { BX_PANIC(("change_cpu_mode: EFER.LMA is set when CR0.PE=0 !")); } @@ -1293,20 +1293,24 @@ void BX_CPU_C::SetCR0(Bit32u val_32) #if BX_SUPPORT_X86_64 if (prev_pg==0 && BX_CPU_THIS_PTR cr0.get_PG()) { - if (BX_CPU_THIS_PTR msr.lme) { + if (BX_CPU_THIS_PTR efer.lme) { if (!BX_CPU_THIS_PTR cr4.get_PAE()) { - BX_ERROR(("SetCR0: attempt to enter x86-64 LONG mode without enabling CR4.PAE !")); + BX_ERROR(("SetCR0: attempt to enter x86-64 long mode without enabling CR4.PAE !")); exception(BX_GP_EXCEPTION, 0, 0); } - BX_CPU_THIS_PTR msr.lma = 1; + if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l) { + BX_ERROR(("SetCR0: attempt to enter x86-64 long mode with CS.L !")); + exception(BX_GP_EXCEPTION, 0, 0); + } + BX_CPU_THIS_PTR efer.lma = 1; } } else if (prev_pg==1 && ! BX_CPU_THIS_PTR cr0.get_PG()) { - if (BX_CPU_THIS_PTR msr.lma) { + if (BX_CPU_THIS_PTR efer.lma) { if (BX_CPU_THIS_PTR dword.rip_upper != 0) { BX_PANIC(("SetCR0: attempt to leave x86-64 LONG mode with RIP upper != 0 !!!")); } - BX_CPU_THIS_PTR msr.lma = 0; + BX_CPU_THIS_PTR efer.lma = 0; } } #endif // #if BX_SUPPORT_X86_64 @@ -1376,11 +1380,11 @@ bx_bool BX_CPU_C::SetCR4(Bit32u val_32) #if BX_SUPPORT_X86_64 // need to GP(0) if LMA=1 and PAE=1->0 - if ((BX_CPU_THIS_PTR msr.lma) + if ((BX_CPU_THIS_PTR efer.lma) && (!(val_32 >> 5) & 1) && (BX_CPU_THIS_PTR cr4.get_PAE())) { - BX_ERROR(("SetCR4: attempt to change PAE when LMA=1")); + BX_ERROR(("SetCR4: attempt to change PAE when EFER.LMA=1")); return 0; } #endif @@ -1711,16 +1715,16 @@ void BX_CPU_C::WRMSR(bxInstruction_c *i) case BX_MSR_EFER: // GPF #0 if lme 0->1 and cr0.pg = 1 // GPF #0 if lme 1->0 and cr0.pg = 1 - if ((BX_CPU_THIS_PTR msr.lme != ((EAX >> 8) & 1)) && + if ((BX_CPU_THIS_PTR efer.lme != ((EAX >> 8) & 1)) && BX_CPU_THIS_PTR cr0.get_PG()) { BX_ERROR(("WRMSR: attempt to change LME when CR0.PG=1")); exception(BX_GP_EXCEPTION, 0, 0); } - BX_CPU_THIS_PTR msr.sce = (EAX >> 0) & 1; - BX_CPU_THIS_PTR msr.lme = (EAX >> 8) & 1; - BX_CPU_THIS_PTR msr.nxe = (EAX >> 11) & 1; - BX_CPU_THIS_PTR msr.ffxsr = (EAX >> 14) & 1; + BX_CPU_THIS_PTR efer.sce = (EAX >> 0) & 1; + BX_CPU_THIS_PTR efer.lme = (EAX >> 8) & 1; + BX_CPU_THIS_PTR efer.nxe = (EAX >> 11) & 1; + BX_CPU_THIS_PTR efer.ffxsr = (EAX >> 14) & 1; return; case BX_MSR_STAR: @@ -1966,13 +1970,13 @@ SYSCALL_LEGACY_MODE: BX_DEBUG(("Execute SYSCALL instruction")); - if (!BX_CPU_THIS_PTR msr.sce) { + if (!BX_CPU_THIS_PTR efer.sce) { exception(BX_UD_EXCEPTION, 0, 0); } invalidate_prefetch_q(); - if (BX_CPU_THIS_PTR msr.lma) + if (BX_CPU_THIS_PTR efer.lma) { RCX = RIP; R11 = read_eflags() & ~(EFlagsRFMask); @@ -2137,7 +2141,7 @@ SYSRET_NON_64BIT_MODE: BX_DEBUG(("Execute SYSRET instruction")); - if (!BX_CPU_THIS_PTR msr.sce) { + if (!BX_CPU_THIS_PTR efer.sce) { exception(BX_UD_EXCEPTION, 0, 0); } diff --git a/bochs/cpu/segment_ctrl_pro.cc b/bochs/cpu/segment_ctrl_pro.cc index 9c4d64aa8..283775352 100644 --- a/bochs/cpu/segment_ctrl_pro.cc +++ b/bochs/cpu/segment_ctrl_pro.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: segment_ctrl_pro.cc,v 1.69 2007-03-14 21:15:15 sshwarts Exp $ +// $Id: segment_ctrl_pro.cc,v 1.70 2007-09-10 20:47:08 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -41,7 +41,7 @@ BX_CPU_C::load_seg_reg(bx_segment_reg_t *seg, Bit16u new_value) if ((new_value & 0xfffc) == 0) { /* null selector */ #if BX_SUPPORT_X86_64 // allow SS = 0 in 64 bit mode with cpl != 3 - if (BX_CPU_THIS_PTR msr.lma && CPL != 3) { + if (BX_CPU_THIS_PTR efer.lma && CPL != 3) { seg->selector.index = 0; seg->selector.ti = 0; seg->selector.rpl = 0; diff --git a/bochs/cpu/smm.cc b/bochs/cpu/smm.cc index 185ffb55a..1cf709bb0 100755 --- a/bochs/cpu/smm.cc +++ b/bochs/cpu/smm.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: smm.cc,v 1.25 2007-07-09 15:16:13 sshwarts Exp $ +// $Id: smm.cc,v 1.26 2007-09-10 20:47:08 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (c) 2006 Stanislav Shwartsman @@ -142,10 +142,10 @@ void BX_CPU_C::enter_system_management_mode(void) BX_CPU_THIS_PTR cr4.setRegister(0); #endif - // EFER.LME = 0, EFER.LME = 1 + // EFER.LME = 0, EFER.LME = 0 #if BX_SUPPORT_X86_64 - BX_CPU_THIS_PTR msr.lme = 0; - BX_CPU_THIS_PTR msr.lma = 0; + BX_CPU_THIS_PTR efer.lme = 0; + BX_CPU_THIS_PTR efer.lma = 0; #endif parse_selector(BX_CPU_THIS_PTR smbase >> 4, @@ -359,26 +359,26 @@ bx_bool BX_CPU_C::smram_restore_state(const Bit32u *saved_state) return 0; } - BX_CPU_THIS_PTR msr.sce = (temp_efer >> 0) & 1; - BX_CPU_THIS_PTR msr.lme = (temp_efer >> 8) & 1; - BX_CPU_THIS_PTR msr.lma = (temp_efer >> 10) & 1; - BX_CPU_THIS_PTR msr.nxe = (temp_efer >> 11) & 1; - BX_CPU_THIS_PTR msr.ffxsr = (temp_efer >> 14) & 1; + BX_CPU_THIS_PTR efer.sce = (temp_efer >> 0) & 1; + BX_CPU_THIS_PTR efer.lme = (temp_efer >> 8) & 1; + BX_CPU_THIS_PTR efer.lma = (temp_efer >> 10) & 1; + BX_CPU_THIS_PTR efer.nxe = (temp_efer >> 11) & 1; + BX_CPU_THIS_PTR efer.ffxsr = (temp_efer >> 14) & 1; - if (BX_CPU_THIS_PTR msr.lma) { + if (BX_CPU_THIS_PTR efer.lma) { if (temp_eflags & EFlagsVMMask) { BX_PANIC(("SMM restore: If EFER.LMA = 1 => RFLAGS.VM=0 !")); return 0; } - if (!BX_CPU_THIS_PTR cr4.get_PAE() || !pg || !pe || !BX_CPU_THIS_PTR msr.lme) { + if (!BX_CPU_THIS_PTR cr4.get_PAE() || !pg || !pe || !BX_CPU_THIS_PTR efer.lme) { BX_PANIC(("SMM restore: If EFER.LMA = 1 <=> CR4.PAE, CR0.PG, CR0.PE, EFER.LME=1 !")); return 0; } } - if (BX_CPU_THIS_PTR cr4.get_PAE() && pg && pe && BX_CPU_THIS_PTR msr.lme) { - if (! BX_CPU_THIS_PTR msr.lma) { + if (BX_CPU_THIS_PTR cr4.get_PAE() && pg && pe && BX_CPU_THIS_PTR efer.lme) { + if (! BX_CPU_THIS_PTR efer.lma) { BX_PANIC(("SMM restore: If EFER.LMA = 1 <=> CR4.PAE, CR0.PG, CR0.PE, EFER.LME=1 !")); return 0; }