extract ffxsr support to separate CPU feature

This commit is contained in:
Stanislav Shwartsman 2011-08-04 19:02:49 +00:00
parent b6e37b818d
commit 2ee0029749
8 changed files with 42 additions and 26 deletions

View File

@ -900,6 +900,7 @@ public: // for now...
#if BX_SUPPORT_X86_64
bx_efer_t efer;
Bit32u efer_suppmask;
#endif
#if BX_CPU_LEVEL >= 6

View File

@ -106,6 +106,7 @@ typedef bx_cpuid_t* (*bx_create_cpuid_method)(BX_CPU_C *cpu);
#define BX_CPU_1G_PAGES (1 << 9) /* 1Gb pages support */
#define BX_CPU_PCID (1 << 10) /* PCID pages support */
#define BX_CPU_SMEP (1 << 11) /* SMEP support */
#define BX_CPU_FFXSR (1 << 12) /* EFER.FFXSR support */
// CPUID defines - STD features CPUID[0x00000001].EDX
// ----------------------------

View File

@ -1213,7 +1213,7 @@ bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::SetCR3(bx_address val)
#if BX_SUPPORT_X86_64
bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::SetEFER(bx_address val_64)
{
if (val_64 & ~BX_EFER_SUPPORTED_BITS) {
if (val_64 & ~((Bit64u) BX_CPU_THIS_PTR efer_suppmask)) {
BX_ERROR(("SetEFER: attempt to set reserved bits of EFER MSR !"));
return 0;
}
@ -1228,7 +1228,7 @@ bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::SetEFER(bx_address val_64)
return 0;
}
BX_CPU_THIS_PTR efer.set32((val32 & BX_EFER_SUPPORTED_BITS & ~BX_EFER_LMA_MASK)
BX_CPU_THIS_PTR efer.set32((val32 & BX_CPU_THIS_PTR efer_suppmask & ~BX_EFER_LMA_MASK)
| (BX_CPU_THIS_PTR efer.get32() & BX_EFER_LMA_MASK)); // keep LMA untouched
return 1;

View File

@ -189,6 +189,8 @@ struct bx_dr7_t {
#define BX_EFER_LME_MASK (1 << 8)
#define BX_EFER_LMA_MASK (1 << 10)
#define BX_EFER_NXE_MASK (1 << 11)
#define BX_EFER_SVME_MASK (1 << 12)
#define BX_EFER_LMSLE_MASK (1 << 13)
#define BX_EFER_FFXSR_MASK (1 << 14)
struct bx_efer_t {
@ -206,10 +208,6 @@ struct bx_efer_t {
BX_CPP_INLINE void set32(Bit32u val) { val32 = val; }
};
#define BX_EFER_SUPPORTED_BITS \
((Bit64u) (BX_EFER_SCE_MASK | BX_EFER_LME_MASK | \
BX_EFER_LMA_MASK | BX_EFER_NXE_MASK | BX_EFER_FFXSR_MASK))
#endif
#if BX_CPU_LEVEL >= 6

View File

@ -840,6 +840,8 @@ void bx_generic_cpuid_t::init_cpu_extensions_bitmask(void)
features_bitmask |= BX_CPU_SMEP;
#if BX_SUPPORT_X86_64
features_bitmask |= BX_CPU_FFXSR;
static bx_bool pcid_enabled = SIM->get_param_bool(BXPN_CPUID_PCID)->get();
if (pcid_enabled)
features_bitmask |= BX_CPU_PCID;
@ -1156,24 +1158,34 @@ Bit32u bx_generic_cpuid_t::get_std2_cpuid_features(void) const
Bit32u bx_generic_cpuid_t::get_ext2_cpuid_features(void) const
{
// ECX:
// [0:0] LAHF/SAHF instructions support in 64-bit mode
// [1:1] CMP_Legacy: Core multi-processing legacy mode (AMD)
// [2:2] SVM: Secure Virtual Machine (AMD)
// [3:3] Extended APIC Space
// [4:4] AltMovCR8: LOCK MOV CR0 means MOV CR8
// [5:5] LZCNT: LZCNT instruction support
// [6:6] SSE4A: SSE4A Instructions support (deprecated?)
// [7:7] Misaligned SSE support
// [8:8] PREFETCHW: PREFETCHW instruction support
// [9:9] OSVW: OS visible workarounds (AMD)
// [11:10] reserved
// [12:12] SKINIT support
// [13:13] WDT: Watchdog timer support
// [31:14] reserved
// [0:0] LAHF/SAHF instructions support in 64-bit mode
// [1:1] CMP_Legacy: Core multi-processing legacy mode (AMD)
// [2:2] SVM: Secure Virtual Machine (AMD)
// [3:3] Extended APIC Space
// [4:4] AltMovCR8: LOCK MOV CR0 means MOV CR8
// [5:5] LZCNT: LZCNT instruction support
// [6:6] SSE4A: SSE4A Instructions support (deprecated?)
// [7:7] Misaligned SSE support
// [8:8] PREFETCHW: PREFETCHW instruction support
// [9:9] OSVW: OS visible workarounds (AMD)
// [10:10] IBS: Instruction based sampling
// [11:11] XOP: Extended Operations Support and XOP Prefix
// [12:12] SKINIT support
// [13:13] WDT: Watchdog timer support
// [14:14] reserved
// [15:15] LWP: Light weight profiling
// [16:16] FMA4: Four-operand FMA instructions support
// [18:17] reserved
// [19:19] NodeId: Indicates support for NodeId MSR (0xc001100c)
// [20:20] reserved
// [21:21] TBM: trailing bit manipulation instruction support
// [22:22] Topology extensions support
// [31:23] reserved
Bit32u features = 0;
#if BX_SUPPORT_X86_64
features |= BX_CPUID_EXT2_LAHF_SAHF;
features |= BX_CPUID_EXT2_LAHF_SAHF | BX_CPUID_EXT2_PREFETCHW;
#endif
#if BX_SUPPORT_MISALIGNED_SSE
features |= BX_CPUID_EXT2_MISALIGNED_SSE;

View File

@ -950,6 +950,10 @@ void BX_CPU_C::reset(unsigned source)
#endif
#if BX_SUPPORT_X86_64
BX_CPU_THIS_PTR efer.set32(0);
BX_CPU_THIS_PTR efer_suppmask = (BX_EFER_SCE_MASK | BX_EFER_LME_MASK |
BX_EFER_LMA_MASK | BX_EFER_NXE_MASK);
if (BX_CPUID_SUPPORT_CPU_EXTENSION(BX_CPU_FFXSR))
BX_CPU_THIS_PTR efer_suppmask |= BX_EFER_FFXSR_MASK;
BX_CPU_THIS_PTR msr.star = 0;
BX_CPU_THIS_PTR msr.lstar = 0;

View File

@ -569,12 +569,12 @@ bx_bool BX_CPU_C::smram_restore_state(const Bit32u *saved_state)
return 0;
}
if (temp_efer & ~BX_EFER_SUPPORTED_BITS) {
BX_PANIC(("SMM restore: Attemp to set EFER reserved bits: 0x%08x !", temp_efer));
if (temp_efer & ~((Bit64u) BX_CPU_THIS_PTR efer_suppmask)) {
BX_PANIC(("SMM restore: Attempt to set EFER reserved bits: 0x%08x !", temp_efer));
return 0;
}
BX_CPU_THIS_PTR efer.set32(temp_efer & BX_EFER_SUPPORTED_BITS);
BX_CPU_THIS_PTR efer.set32(temp_efer & BX_CPU_THIS_PTR efer_suppmask);
if (BX_CPU_THIS_PTR efer.get_LMA()) {
if (temp_eflags & EFlagsVMMask) {

View File

@ -807,7 +807,7 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckHostState(void)
#if BX_SUPPORT_VMX >= 2
if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_EFER_MSR) {
host_state->efer_msr = VMread64(VMCS_64BIT_HOST_IA32_EFER);
if (host_state->efer_msr & ~BX_EFER_SUPPORTED_BITS) {
if (host_state->efer_msr & ~((Bit64u) BX_CPU_THIS_PTR efer_suppmask)) {
BX_ERROR(("VMFAIL: VMCS host EFER reserved bits set !"));
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
}
@ -1320,7 +1320,7 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
#if BX_SUPPORT_VMX >= 2 && BX_SUPPORT_X86_64
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_EFER_MSR) {
guest.efer_msr = VMread64(VMCS_64BIT_GUEST_IA32_EFER);
if (guest.efer_msr & ~BX_EFER_SUPPORTED_BITS) {
if (guest.efer_msr & ~((Bit64u) BX_CPU_THIS_PTR efer_suppmask)) {
BX_ERROR(("VMENTER FAIL: VMCS guest EFER reserved bits set !"));
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
}