2009-01-31 13:43:24 +03:00
|
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2011-02-25 00:54:04 +03:00
|
|
|
|
// $Id$
|
2009-01-31 13:43:24 +03:00
|
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
|
//
|
2015-01-25 23:55:10 +03:00
|
|
|
|
// Copyright (c) 2009-2015 Stanislav Shwartsman
|
2009-01-31 13:43:24 +03:00
|
|
|
|
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
|
|
|
|
|
//
|
|
|
|
|
// This library is free software; you can redistribute it and/or
|
|
|
|
|
// modify it under the terms of the GNU Lesser General Public
|
|
|
|
|
// License as published by the Free Software Foundation; either
|
|
|
|
|
// version 2 of the License, or (at your option) any later version.
|
|
|
|
|
//
|
|
|
|
|
// This library is distributed in the hope that it will be useful,
|
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
|
// Lesser General Public License for more details.
|
|
|
|
|
//
|
|
|
|
|
// You should have received a copy of the GNU Lesser General Public
|
|
|
|
|
// License along with this library; if not, write to the Free Software
|
|
|
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
|
|
|
|
//
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
|
|
|
|
#include "bochs.h"
|
|
|
|
|
#include "cpu.h"
|
|
|
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
|
|
|
|
|
|
|
|
|
#include "iodev/iodev.h"
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
extern VMCS_Mapping vmcs_map;
|
2010-03-27 00:26:08 +03:00
|
|
|
|
|
2010-11-12 00:41:03 +03:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
extern bx_bool isValidMSR_PAT(Bit64u pat_msr);
|
|
|
|
|
#endif
|
|
|
|
|
|
2012-10-04 20:15:58 +04:00
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
// VMEXIT reasons for BX prints
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
static const char *VMX_vmexit_reason_name[] =
|
|
|
|
|
{
|
2015-07-12 23:10:43 +03:00
|
|
|
|
/* 0 */ "Exception or NMI",
|
|
|
|
|
/* 1 */ "External Interrupt",
|
|
|
|
|
/* 2 */ "Triple Fault",
|
|
|
|
|
/* 3 */ "INIT",
|
|
|
|
|
/* 4 */ "SIPI",
|
|
|
|
|
/* 5 */ "I/O SMI (SMM Vmexit)",
|
|
|
|
|
/* 6 */ "SMI (SMM Vmexit)",
|
|
|
|
|
/* 7 */ "Interrupt Window Exiting",
|
|
|
|
|
/* 8 */ "NMI Window Exiting",
|
|
|
|
|
/* 9 */ "Task Switch",
|
|
|
|
|
/* 10 */ "CPUID",
|
|
|
|
|
/* 11 */ "GETSEC",
|
|
|
|
|
/* 12 */ "HLT",
|
|
|
|
|
/* 13 */ "INVD",
|
|
|
|
|
/* 14 */ "INVLPG",
|
|
|
|
|
/* 15 */ "RDPMC",
|
|
|
|
|
/* 16 */ "RDTSC",
|
|
|
|
|
/* 17 */ "RSM",
|
|
|
|
|
/* 18 */ "VMCALL",
|
|
|
|
|
/* 19 */ "VMCLEAR",
|
|
|
|
|
/* 20 */ "VMLAUNCH",
|
|
|
|
|
/* 21 */ "VMPTRLD",
|
|
|
|
|
/* 22 */ "VMPTRST",
|
|
|
|
|
/* 23 */ "VMREAD",
|
|
|
|
|
/* 24 */ "VMRESUME",
|
|
|
|
|
/* 25 */ "VMWRITE",
|
|
|
|
|
/* 26 */ "VMXOFF",
|
|
|
|
|
/* 27 */ "VMXON",
|
|
|
|
|
/* 28 */ "CR Access",
|
|
|
|
|
/* 29 */ "DR Access",
|
|
|
|
|
/* 30 */ "I/O Instruction",
|
|
|
|
|
/* 31 */ "RDMSR",
|
|
|
|
|
/* 32 */ "WRMSR",
|
|
|
|
|
/* 33 */ "VMEntry failure due to invalid guest state",
|
|
|
|
|
/* 34 */ "VMEntry failure due to MSR loading",
|
|
|
|
|
/* 35 */ "Reserved35",
|
|
|
|
|
/* 36 */ "MWAIT",
|
|
|
|
|
/* 37 */ "MTF (Monitor Trap Flag)",
|
|
|
|
|
/* 38 */ "Reserved38",
|
|
|
|
|
/* 39 */ "MONITOR",
|
|
|
|
|
/* 40 */ "PAUSE",
|
|
|
|
|
/* 41 */ "VMEntry failure due to machine check",
|
|
|
|
|
/* 42 */ "Reserved42",
|
|
|
|
|
/* 43 */ "TPR Below Threshold",
|
|
|
|
|
/* 44 */ "APIC Access",
|
|
|
|
|
/* 45 */ "Virtualized EOI",
|
|
|
|
|
/* 46 */ "GDTR/IDTR Access",
|
|
|
|
|
/* 47 */ "LDTR/TR Access",
|
|
|
|
|
/* 48 */ "EPT Violation",
|
|
|
|
|
/* 49 */ "EPT Misconfiguration",
|
|
|
|
|
/* 50 */ "INVEPT",
|
|
|
|
|
/* 51 */ "RDTSCP",
|
|
|
|
|
/* 52 */ "VMX preemption timer expired",
|
|
|
|
|
/* 53 */ "INVVPID",
|
|
|
|
|
/* 54 */ "WBINVD",
|
|
|
|
|
/* 55 */ "XSETBV",
|
|
|
|
|
/* 56 */ "APIC Write Trap",
|
|
|
|
|
/* 57 */ "RDRAND",
|
|
|
|
|
/* 58 */ "INVPCID",
|
|
|
|
|
/* 59 */ "VMFUNC",
|
2016-04-15 14:35:32 +03:00
|
|
|
|
/* 60 */ "ENCLS",
|
2015-07-12 23:10:43 +03:00
|
|
|
|
/* 61 */ "RDSEED",
|
|
|
|
|
/* 62 */ "PML Log Full",
|
|
|
|
|
/* 63 */ "XSAVES",
|
|
|
|
|
/* 64 */ "XRSTORS",
|
2012-10-04 20:15:58 +04:00
|
|
|
|
};
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
// VMCS access
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
|
2009-05-03 17:02:14 +04:00
|
|
|
|
void BX_CPU_C::set_VMCSPTR(Bit64u vmxptr)
|
|
|
|
|
{
|
|
|
|
|
BX_CPU_THIS_PTR vmcsptr = vmxptr;
|
|
|
|
|
|
2015-02-23 00:26:26 +03:00
|
|
|
|
if (vmxptr != BX_INVALID_VMCSPTR) {
|
2010-03-16 17:51:20 +03:00
|
|
|
|
BX_CPU_THIS_PTR vmcshostptr = BX_CPU_THIS_PTR getHostMemAddr(vmxptr, BX_WRITE);
|
2015-02-23 00:26:26 +03:00
|
|
|
|
#if BX_SUPPORT_MEMTYPE
|
2015-07-12 23:10:43 +03:00
|
|
|
|
BX_CPU_THIS_PTR vmcs_memtype = MEMTYPE(resolve_memtype(vmxptr));
|
2015-02-23 00:26:26 +03:00
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
else {
|
2009-05-03 17:02:14 +04:00
|
|
|
|
BX_CPU_THIS_PTR vmcshostptr = 0;
|
2015-02-23 00:26:26 +03:00
|
|
|
|
#if BX_SUPPORT_MEMTYPE
|
|
|
|
|
BX_CPU_THIS_PTR vmcs_memtype = BX_MEMTYPE_UC;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
2009-05-03 17:02:14 +04:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
Bit16u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread16(unsigned encoding)
|
2009-07-21 15:56:26 +04:00
|
|
|
|
{
|
|
|
|
|
Bit16u field;
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2009-07-21 15:56:26 +04:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMread16: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
|
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR vmcshostptr) {
|
|
|
|
|
Bit16u *hostAddr = (Bit16u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
|
2011-12-27 23:42:11 +04:00
|
|
|
|
ReadHostWordFromLittleEndian(hostAddr, field);
|
2009-07-21 15:56:26 +04:00
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
access_read_physical(pAddr, 2, (Bit8u*)(&field));
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 2, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_READ, BX_VMCS_ACCESS, (Bit8u*)(&field));
|
2009-07-21 15:56:26 +04:00
|
|
|
|
|
|
|
|
|
return field;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// write 16-bit value into VMCS 16-bit field
|
2011-07-22 00:58:54 +04:00
|
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite16(unsigned encoding, Bit16u val_16)
|
2009-07-21 15:56:26 +04:00
|
|
|
|
{
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2009-07-21 15:56:26 +04:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMwrite16: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
|
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR vmcshostptr) {
|
|
|
|
|
Bit16u *hostAddr = (Bit16u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
|
2011-01-04 19:17:20 +03:00
|
|
|
|
pageWriteStampTable.decWriteStamp(pAddr, 2);
|
2009-07-21 15:56:26 +04:00
|
|
|
|
WriteHostWordToLittleEndian(hostAddr, val_16);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
access_write_physical(pAddr, 2, (Bit8u*)(&val_16));
|
|
|
|
|
}
|
|
|
|
|
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 2, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_WRITE, BX_VMCS_ACCESS, (Bit8u*)(&val_16));
|
2009-07-21 15:56:26 +04:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
Bit32u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread32(unsigned encoding)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
Bit32u field;
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMread32: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
|
|
|
|
|
|
2009-05-03 17:02:14 +04:00
|
|
|
|
if (BX_CPU_THIS_PTR vmcshostptr) {
|
|
|
|
|
Bit32u *hostAddr = (Bit32u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
|
|
|
|
|
ReadHostDWordFromLittleEndian(hostAddr, field);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
access_read_physical(pAddr, 4, (Bit8u*)(&field));
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 4, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_READ, BX_VMCS_ACCESS, (Bit8u*)(&field));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
return field;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// write 32-bit value into VMCS field
|
2011-07-22 00:58:54 +04:00
|
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite32(unsigned encoding, Bit32u val_32)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMwrite32: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
|
|
|
|
|
|
2009-05-03 17:02:14 +04:00
|
|
|
|
if (BX_CPU_THIS_PTR vmcshostptr) {
|
|
|
|
|
Bit32u *hostAddr = (Bit32u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
|
2011-01-04 19:17:20 +03:00
|
|
|
|
pageWriteStampTable.decWriteStamp(pAddr, 4);
|
2009-05-03 17:02:14 +04:00
|
|
|
|
WriteHostDWordToLittleEndian(hostAddr, val_32);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
access_write_physical(pAddr, 4, (Bit8u*)(&val_32));
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 4, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_WRITE, BX_VMCS_ACCESS, (Bit8u*)(&val_32));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
Bit64u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread64(unsigned encoding)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
BX_ASSERT(!IS_VMCS_FIELD_HI(encoding));
|
|
|
|
|
|
|
|
|
|
Bit64u field;
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMread64: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
|
|
|
|
|
|
2009-05-03 17:02:14 +04:00
|
|
|
|
if (BX_CPU_THIS_PTR vmcshostptr) {
|
|
|
|
|
Bit64u *hostAddr = (Bit64u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
|
|
|
|
|
ReadHostQWordFromLittleEndian(hostAddr, field);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
access_read_physical(pAddr, 8, (Bit8u*)(&field));
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_READ, BX_VMCS_ACCESS, (Bit8u*)(&field));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
return field;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// write 64-bit value into VMCS field
|
2011-07-22 00:58:54 +04:00
|
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite64(unsigned encoding, Bit64u val_64)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
BX_ASSERT(!IS_VMCS_FIELD_HI(encoding));
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMwrite64: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcsptr + offset;
|
|
|
|
|
|
2009-05-03 17:02:14 +04:00
|
|
|
|
if (BX_CPU_THIS_PTR vmcshostptr) {
|
|
|
|
|
Bit64u *hostAddr = (Bit64u*) (BX_CPU_THIS_PTR vmcshostptr | offset);
|
2011-01-04 19:17:20 +03:00
|
|
|
|
pageWriteStampTable.decWriteStamp(pAddr, 8);
|
2009-05-03 17:02:14 +04:00
|
|
|
|
WriteHostQWordToLittleEndian(hostAddr, val_64);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
access_write_physical(pAddr, 8, (Bit8u*)(&val_64));
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_WRITE, BX_VMCS_ACCESS, (Bit8u*)(&val_64));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-22 01:34:56 +04:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
2011-07-22 00:58:54 +04:00
|
|
|
|
BX_CPP_INLINE bx_address BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread_natural(unsigned encoding)
|
|
|
|
|
{
|
|
|
|
|
return VMread64(encoding);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite_natural(unsigned encoding, bx_address val)
|
|
|
|
|
{
|
|
|
|
|
VMwrite64(encoding, val);
|
|
|
|
|
}
|
|
|
|
|
#else
|
|
|
|
|
BX_CPP_INLINE bx_address BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread_natural(unsigned encoding)
|
|
|
|
|
{
|
|
|
|
|
return VMread32(encoding);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite_natural(unsigned encoding, bx_address val)
|
|
|
|
|
{
|
|
|
|
|
VMwrite32(encoding, val);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
////////////////////////////////////////////////////////////
|
2013-01-21 23:55:00 +04:00
|
|
|
|
// Shadow VMCS access
|
2009-01-31 13:43:24 +03:00
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
|
|
|
|
|
Bit16u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread16_Shadow(unsigned encoding)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMread16_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
|
|
|
|
|
Bit16u field;
|
|
|
|
|
access_read_physical(pAddr, 2, (Bit8u*)(&field));
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 2, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&field));
|
2013-01-21 23:55:00 +04:00
|
|
|
|
|
|
|
|
|
return field;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// write 16-bit value into shadow VMCS 16-bit field
|
|
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite16_Shadow(unsigned encoding, Bit16u val_16)
|
|
|
|
|
{
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMwrite16_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
|
|
|
|
|
access_write_physical(pAddr, 2, (Bit8u*)(&val_16));
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 2, MEMTYPE(resolve_memtype(pAddr)), BX_WRITE, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&val_16));
|
2013-01-21 23:55:00 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit32u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread32_Shadow(unsigned encoding)
|
|
|
|
|
{
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMread32_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
|
|
|
|
|
Bit32u field;
|
|
|
|
|
access_read_physical(pAddr, 4, (Bit8u*)(&field));
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 4, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&field));
|
2013-01-21 23:55:00 +04:00
|
|
|
|
|
|
|
|
|
return field;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// write 32-bit value into shadow VMCS field
|
|
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite32_Shadow(unsigned encoding, Bit32u val_32)
|
|
|
|
|
{
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMwrite32_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
|
|
|
|
|
access_write_physical(pAddr, 4, (Bit8u*)(&val_32));
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 4, MEMTYPE(resolve_memtype(pAddr)), BX_WRITE, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&val_32));
|
2013-01-21 23:55:00 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit64u BX_CPP_AttrRegparmN(1) BX_CPU_C::VMread64_Shadow(unsigned encoding)
|
|
|
|
|
{
|
|
|
|
|
BX_ASSERT(!IS_VMCS_FIELD_HI(encoding));
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMread64_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
|
|
|
|
|
Bit64u field;
|
|
|
|
|
access_read_physical(pAddr, 8, (Bit8u*)(&field));
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&field));
|
2013-01-21 23:55:00 +04:00
|
|
|
|
|
|
|
|
|
return field;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
// write 64-bit value into shadow VMCS field
|
|
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMwrite64_Shadow(unsigned encoding, Bit64u val_64)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
2013-01-21 23:55:00 +04:00
|
|
|
|
BX_ASSERT(!IS_VMCS_FIELD_HI(encoding));
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
unsigned offset = BX_CPU_THIS_PTR vmcs_map->vmcs_field_offset(encoding);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if(offset >= VMX_VMCS_AREA_SIZE)
|
|
|
|
|
BX_PANIC(("VMwrite64_Shadow: can't access encoding 0x%08x, offset=0x%x", encoding, offset));
|
|
|
|
|
|
|
|
|
|
bx_phy_address pAddr = BX_CPU_THIS_PTR vmcs.vmcs_linkptr + offset;
|
|
|
|
|
access_write_physical(pAddr, 8, (Bit8u*)(&val_64));
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(resolve_memtype(pAddr)), BX_WRITE, BX_SHADOW_VMCS_ACCESS, (Bit8u*)(&val_64));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
// VMfail/VMsucceed
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_CPP_INLINE void BX_CPU_C::VMfail(Bit32u error_code)
|
|
|
|
|
{
|
2014-10-22 21:49:12 +04:00
|
|
|
|
clearEFlagsOSZAPC();
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if ((BX_CPU_THIS_PTR vmcsptr != BX_INVALID_VMCSPTR)) { // executed only if there is a current VMCS
|
2014-10-22 21:49:12 +04:00
|
|
|
|
assert_ZF();
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMwrite32(VMCS_32BIT_INSTRUCTION_ERROR, error_code);
|
|
|
|
|
}
|
|
|
|
|
else {
|
2014-10-22 21:49:12 +04:00
|
|
|
|
assert_CF();
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BX_CPU_C::VMabort(VMX_vmabort_code error_code)
|
|
|
|
|
{
|
2015-07-06 21:46:57 +03:00
|
|
|
|
VMwrite32(VMCS_VMX_ABORT_FIELD_ENCODING, (Bit32u) error_code);
|
2011-07-03 19:59:48 +04:00
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
// Deactivate VMX preemtion timer
|
|
|
|
|
BX_CPU_THIS_PTR lapic.deactivate_vmx_preemption_timer();
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
shutdown();
|
|
|
|
|
}
|
|
|
|
|
|
2012-12-27 01:59:16 +04:00
|
|
|
|
Bit32u BX_CPU_C::VMXReadRevisionID(bx_phy_address pAddr)
|
2009-06-06 14:21:49 +04:00
|
|
|
|
{
|
|
|
|
|
Bit32u revision;
|
|
|
|
|
access_read_physical(pAddr + VMCS_REVISION_ID_FIELD_ADDR, 4, &revision);
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr + VMCS_REVISION_ID_FIELD_ADDR, 4, MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype),
|
2012-01-18 01:50:15 +04:00
|
|
|
|
BX_READ, BX_VMCS_ACCESS, (Bit8u*)(&revision));
|
2009-06-06 14:21:49 +04:00
|
|
|
|
|
|
|
|
|
return revision;
|
|
|
|
|
}
|
|
|
|
|
|
2010-04-07 21:12:17 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2012-05-02 22:11:39 +04:00
|
|
|
|
bx_bool BX_CPU_C::is_eptptr_valid(Bit64u eptptr)
|
2010-04-07 21:12:17 +04:00
|
|
|
|
{
|
|
|
|
|
// [2:0] EPT paging-structure memory type
|
|
|
|
|
// 0 = Uncacheable (UC)
|
|
|
|
|
// 6 = Write-back (WB)
|
|
|
|
|
Bit32u memtype = eptptr & 7;
|
|
|
|
|
if (memtype != BX_MEMTYPE_UC && memtype != BX_MEMTYPE_WB) return 0;
|
|
|
|
|
|
|
|
|
|
// [5:3] This value is 1 less than the EPT page-walk length
|
|
|
|
|
Bit32u walk_length = (eptptr >> 3) & 7;
|
|
|
|
|
if (walk_length != 3) return 0;
|
|
|
|
|
|
2014-04-24 22:02:40 +04:00
|
|
|
|
// [6] EPT A/D Enable
|
2012-05-02 22:11:39 +04:00
|
|
|
|
if (! BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPT_ACCESS_DIRTY)) {
|
|
|
|
|
if (eptptr & 0x40) {
|
|
|
|
|
BX_ERROR(("is_eptptr_valid: EPTPTR A/D enabled when not supported by CPU"));
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-24 22:02:40 +04:00
|
|
|
|
#define BX_EPTPTR_RESERVED_BITS 0xf80 /* bits 11:7 are reserved */
|
2012-05-02 22:11:39 +04:00
|
|
|
|
if (eptptr & BX_EPTPTR_RESERVED_BITS) {
|
|
|
|
|
BX_ERROR(("is_eptptr_valid: EPTPTR reserved bits set"));
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
2010-04-07 21:12:17 +04:00
|
|
|
|
|
|
|
|
|
if (! IsValidPhyAddr(eptptr)) return 0;
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2012-10-26 22:43:53 +04:00
|
|
|
|
BX_CPP_INLINE Bit32u rotate_r(Bit32u val_32)
|
|
|
|
|
{
|
|
|
|
|
return (val_32 >> 8) | (val_32 << 24);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BX_CPP_INLINE Bit32u rotate_l(Bit32u val_32)
|
|
|
|
|
{
|
|
|
|
|
return (val_32 << 8) | (val_32 >> 24);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BX_CPP_INLINE Bit32u vmx_from_ar_byte_rd(Bit32u ar_byte)
|
|
|
|
|
{
|
|
|
|
|
return rotate_r(ar_byte);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BX_CPP_INLINE Bit32u vmx_from_ar_byte_wr(Bit32u ar_byte)
|
|
|
|
|
{
|
|
|
|
|
return rotate_l(ar_byte);
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
// VMenter
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
|
2009-02-20 11:12:51 +03:00
|
|
|
|
extern struct BxExceptionInfo exceptions_info[];
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
#define VMENTRY_INJECTING_EVENT(vmentry_interr_info) (vmentry_interr_info & 0x80000000)
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#define VMX_CHECKS_USE_MSR_VMX_PINBASED_CTRLS_LO \
|
|
|
|
|
((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_PINBASED_CTRLS_LO : VMX_MSR_VMX_PINBASED_CTRLS_LO)
|
|
|
|
|
#define VMX_CHECKS_USE_MSR_VMX_PINBASED_CTRLS_HI \
|
|
|
|
|
((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_PINBASED_CTRLS_HI : VMX_MSR_VMX_PINBASED_CTRLS_HI)
|
|
|
|
|
|
|
|
|
|
#define VMX_CHECKS_USE_MSR_VMX_PROCBASED_CTRLS_LO \
|
|
|
|
|
((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_PROCBASED_CTRLS_LO : VMX_MSR_VMX_PROCBASED_CTRLS_LO)
|
|
|
|
|
#define VMX_CHECKS_USE_MSR_VMX_PROCBASED_CTRLS_HI \
|
|
|
|
|
((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_PROCBASED_CTRLS_HI : VMX_MSR_VMX_PROCBASED_CTRLS_HI)
|
|
|
|
|
|
|
|
|
|
#define VMX_CHECKS_USE_MSR_VMX_VMEXIT_CTRLS_LO \
|
|
|
|
|
((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_VMEXIT_CTRLS_LO : VMX_MSR_VMX_VMEXIT_CTRLS_LO)
|
|
|
|
|
#define VMX_CHECKS_USE_MSR_VMX_VMEXIT_CTRLS_HI \
|
|
|
|
|
((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_VMEXIT_CTRLS_HI : VMX_MSR_VMX_VMEXIT_CTRLS_HI)
|
|
|
|
|
|
|
|
|
|
#define VMX_CHECKS_USE_MSR_VMX_VMENTRY_CTRLS_LO \
|
|
|
|
|
((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_VMENTRY_CTRLS_LO : VMX_MSR_VMX_VMENTRY_CTRLS_LO)
|
|
|
|
|
#define VMX_CHECKS_USE_MSR_VMX_VMENTRY_CTRLS_HI \
|
|
|
|
|
((BX_SUPPORT_VMX >= 2) ? VMX_MSR_VMX_TRUE_VMENTRY_CTRLS_HI : VMX_MSR_VMX_VMENTRY_CTRLS_HI)
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
|
|
|
|
|
{
|
|
|
|
|
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Load VM-execution control fields to VMCS Cache
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
vm->vmexec_ctrls1 = VMread32(VMCS_32BIT_CONTROL_PIN_BASED_EXEC_CONTROLS);
|
2010-03-15 16:47:18 +03:00
|
|
|
|
vm->vmexec_ctrls2 = VMread32(VMCS_32BIT_CONTROL_PROCESSOR_BASED_VMEXEC_CONTROLS);
|
2010-03-15 16:54:54 +03:00
|
|
|
|
if (VMEXIT(VMX_VM_EXEC_CTRL2_SECONDARY_CONTROLS))
|
|
|
|
|
vm->vmexec_ctrls3 = VMread32(VMCS_32BIT_CONTROL_SECONDARY_VMEXEC_CONTROLS);
|
2010-03-15 16:57:12 +03:00
|
|
|
|
else
|
|
|
|
|
vm->vmexec_ctrls3 = 0;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
vm->vm_exceptions_bitmap = VMread32(VMCS_32BIT_CONTROL_EXECUTION_BITMAP);
|
|
|
|
|
vm->vm_pf_mask = VMread32(VMCS_32BIT_CONTROL_PAGE_FAULT_ERR_CODE_MASK);
|
|
|
|
|
vm->vm_pf_match = VMread32(VMCS_32BIT_CONTROL_PAGE_FAULT_ERR_CODE_MATCH);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
vm->vm_cr0_mask = VMread_natural(VMCS_CONTROL_CR0_GUEST_HOST_MASK);
|
|
|
|
|
vm->vm_cr4_mask = VMread_natural(VMCS_CONTROL_CR4_GUEST_HOST_MASK);
|
|
|
|
|
vm->vm_cr0_read_shadow = VMread_natural(VMCS_CONTROL_CR0_READ_SHADOW);
|
|
|
|
|
vm->vm_cr4_read_shadow = VMread_natural(VMCS_CONTROL_CR4_READ_SHADOW);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
vm->vm_cr3_target_cnt = VMread32(VMCS_32BIT_CONTROL_CR3_TARGET_COUNT);
|
|
|
|
|
for (int n=0; n<VMX_CR3_TARGET_MAX_CNT; n++)
|
2011-07-22 00:58:54 +04:00
|
|
|
|
vm->vm_cr3_target_value[n] = VMread_natural(VMCS_CR3_TARGET0 + 2*n);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Check VM-execution control fields
|
|
|
|
|
//
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
if (~vm->vmexec_ctrls1 & VMX_CHECKS_USE_MSR_VMX_PINBASED_CTRLS_LO) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX pin-based controls allowed 0-settings"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
2010-04-03 11:30:23 +04:00
|
|
|
|
if (vm->vmexec_ctrls1 & ~VMX_CHECKS_USE_MSR_VMX_PINBASED_CTRLS_HI) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX pin-based controls allowed 1-settings"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
if (~vm->vmexec_ctrls2 & VMX_CHECKS_USE_MSR_VMX_PROCBASED_CTRLS_LO) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX proc-based controls allowed 0-settings"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
2010-04-03 11:30:23 +04:00
|
|
|
|
if (vm->vmexec_ctrls2 & ~VMX_CHECKS_USE_MSR_VMX_PROCBASED_CTRLS_HI) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX proc-based controls allowed 1-settings"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-15 18:48:01 +03:00
|
|
|
|
if (~vm->vmexec_ctrls3 & VMX_MSR_VMX_PROCBASED_CTRLS2_LO) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX secondary proc-based controls allowed 0-settings"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
if (vm->vmexec_ctrls3 & ~VMX_MSR_VMX_PROCBASED_CTRLS2_HI) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX secondary proc-based controls allowed 1-settings"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (vm->vm_cr3_target_cnt > VMX_CR3_TARGET_MAX_CNT) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: too may CR3 targets %d", vm->vm_cr3_target_cnt));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_IO_BITMAPS) {
|
2010-03-18 00:08:21 +03:00
|
|
|
|
vm->io_bitmap_addr[0] = VMread64(VMCS_64BIT_CONTROL_IO_BITMAP_A);
|
|
|
|
|
vm->io_bitmap_addr[1] = VMread64(VMCS_64BIT_CONTROL_IO_BITMAP_B);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
// I/O bitmaps control enabled
|
|
|
|
|
for (int bitmap=0; bitmap < 2; bitmap++) {
|
2012-12-27 01:59:16 +04:00
|
|
|
|
if (! IsValidPageAlignedPhyAddr(vm->io_bitmap_addr[bitmap])) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: I/O bitmap %c phy addr malformed", 'A' + bitmap));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_MSR_BITMAPS) {
|
|
|
|
|
// MSR bitmaps control enabled
|
2010-03-18 00:08:21 +03:00
|
|
|
|
vm->msr_bitmap_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_MSR_BITMAPS);
|
2012-12-27 01:59:16 +04:00
|
|
|
|
if (! IsValidPageAlignedPhyAddr(vm->msr_bitmap_addr)) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: MSR bitmap phy addr malformed"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-02-25 00:22:22 +04:00
|
|
|
|
if (! (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_NMI_EXITING)) {
|
|
|
|
|
if (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: misconfigured virtual NMI control"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (! (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)) {
|
2013-02-25 00:22:22 +04:00
|
|
|
|
if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_NMI_WINDOW_EXITING) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: misconfigured virtual NMI control"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VMCS_SHADOWING) {
|
|
|
|
|
vm->vmread_bitmap_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_VMREAD_BITMAP_ADDR);
|
|
|
|
|
if (! IsValidPageAlignedPhyAddr(vm->vmread_bitmap_addr)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMREAD bitmap phy addr malformed"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
vm->vmwrite_bitmap_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_VMWRITE_BITMAP_ADDR);
|
|
|
|
|
if (! IsValidPageAlignedPhyAddr(vm->vmwrite_bitmap_addr)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMWRITE bitmap phy addr malformed"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-01-28 20:30:25 +04:00
|
|
|
|
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_VIOLATION_EXCEPTION) {
|
|
|
|
|
vm->ve_info_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_VE_EXCEPTION_INFO_ADDR);
|
|
|
|
|
if (! IsValidPageAlignedPhyAddr(vm->ve_info_addr)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: broken #VE information address"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-01-21 23:55:00 +04:00
|
|
|
|
#endif
|
|
|
|
|
|
2011-09-26 22:08:31 +04:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_TPR_SHADOW) {
|
2010-03-18 00:08:21 +03:00
|
|
|
|
vm->virtual_apic_page_addr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_VIRTUAL_APIC_PAGE_ADDR);
|
2012-12-27 01:59:16 +04:00
|
|
|
|
if (! IsValidPageAlignedPhyAddr(vm->virtual_apic_page_addr)) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: virtual apic phy addr malformed"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
2009-09-30 09:57:21 +04:00
|
|
|
|
|
2012-10-26 22:43:53 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY) {
|
|
|
|
|
if (! PIN_VMEXIT(VMX_VM_EXEC_CTRL1_EXTERNAL_INTERRUPT_VMEXIT)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: virtual interrupt delivery must be set together with external interrupt exiting"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (int reg = 0; reg < 8; reg++) {
|
|
|
|
|
vm->eoi_exit_bitmap[reg] = VMread32(VMCS_64BIT_CONTROL_EOI_EXIT_BITMAP0 + reg);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit16u guest_interrupt_status = VMread16(VMCS_16BIT_GUEST_INTERRUPT_STATUS);
|
|
|
|
|
vm->rvi = guest_interrupt_status & 0xff;
|
|
|
|
|
vm->svi = guest_interrupt_status >> 8;
|
2009-09-30 09:57:21 +04:00
|
|
|
|
}
|
2012-10-26 22:43:53 +04:00
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
{
|
|
|
|
|
vm->vm_tpr_threshold = VMread32(VMCS_32BIT_CONTROL_TPR_THRESHOLD);
|
2009-09-30 09:57:21 +04:00
|
|
|
|
|
2012-10-26 22:43:53 +04:00
|
|
|
|
if (vm->vm_tpr_threshold & 0xfffffff0) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: TPR threshold too big"));
|
2010-03-16 17:51:20 +03:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
2012-10-26 22:43:53 +04:00
|
|
|
|
|
|
|
|
|
if (! (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES)) {
|
|
|
|
|
Bit8u tpr_shadow = (VMX_Read_Virtual_APIC(BX_LAPIC_TPR) >> 4) & 0xf;
|
|
|
|
|
if (vm->vm_tpr_threshold > tpr_shadow) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: TPR threshold > TPR shadow"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-09-30 09:57:21 +04:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
2012-10-26 22:43:53 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
else { // TPR shadow is disabled
|
|
|
|
|
if (vm->vmexec_ctrls3 & (VMX_VM_EXEC_CTRL3_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
|
VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_REGISTERS |
|
|
|
|
|
VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY))
|
|
|
|
|
{
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: apic virtualization is enabled without TPR shadow"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif // BX_SUPPORT_VMX >= 2
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2010-03-16 17:51:20 +03:00
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES) {
|
2010-03-18 00:08:21 +03:00
|
|
|
|
vm->apic_access_page = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_APIC_ACCESS_ADDR);
|
2012-12-27 01:59:16 +04:00
|
|
|
|
if (! IsValidPageAlignedPhyAddr(vm->apic_access_page)) {
|
2010-03-16 17:51:20 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: apic access page phy addr malformed"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2011-09-26 23:36:20 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2012-10-26 22:43:53 +04:00
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUALIZE_X2APIC_MODE) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: virtualize X2APIC mode enabled together with APIC access virtualization"));
|
2010-04-08 19:50:39 +04:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
2012-10-26 22:43:53 +04:00
|
|
|
|
#endif
|
2010-04-08 19:50:39 +04:00
|
|
|
|
}
|
|
|
|
|
|
2012-10-26 22:43:53 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2010-04-07 21:12:17 +04:00
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
|
|
|
|
|
vm->eptptr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_EPTPTR);
|
|
|
|
|
if (! is_eptptr_valid(vm->eptptr)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: invalid EPTPTR value"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-04-09 15:31:55 +04:00
|
|
|
|
else {
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: unrestricted guest without EPT"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-04-07 21:12:17 +04:00
|
|
|
|
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VPID_ENABLE) {
|
|
|
|
|
vm->vpid = VMread16(VMCS_16BIT_CONTROL_VPID);
|
2010-04-09 19:19:14 +04:00
|
|
|
|
if (vm->vpid == 0) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: guest VPID == 0"));
|
2010-04-07 21:12:17 +04:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-07-22 13:28:31 +04:00
|
|
|
|
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_PAUSE_LOOP_VMEXIT) {
|
2013-01-09 01:03:22 +04:00
|
|
|
|
vm->ple.pause_loop_exiting_gap = VMread32(VMCS_32BIT_CONTROL_PAUSE_LOOP_EXITING_GAP);
|
|
|
|
|
vm->ple.pause_loop_exiting_window = VMread32(VMCS_32BIT_CONTROL_PAUSE_LOOP_EXITING_WINDOW);
|
2011-07-22 13:28:31 +04:00
|
|
|
|
}
|
2011-11-05 11:31:51 +04:00
|
|
|
|
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VMFUNC_ENABLE)
|
|
|
|
|
vm->vmfunc_ctrls = VMread64(VMCS_64BIT_CONTROL_VMFUNC_CTRLS);
|
|
|
|
|
else
|
|
|
|
|
vm->vmfunc_ctrls = 0;
|
|
|
|
|
|
|
|
|
|
if (vm->vmfunc_ctrls & ~VMX_VMFUNC_CTRL1_SUPPORTED_BITS) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS VM Functions control reserved bits set"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vm->vmfunc_ctrls & VMX_VMFUNC_EPTP_SWITCHING_MASK) {
|
|
|
|
|
if ((vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) == 0) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMFUNC EPTP-SWITCHING: EPT disabled"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vm->eptp_list_address = VMread64(VMCS_64BIT_CONTROL_EPTP_LIST_ADDRESS);
|
2012-12-27 01:59:16 +04:00
|
|
|
|
if (! IsValidPageAlignedPhyAddr(vm->eptp_list_address)) {
|
2011-11-05 11:31:51 +04:00
|
|
|
|
BX_ERROR(("VMFAIL: VMFUNC EPTP-SWITCHING: eptp list phy addr malformed"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-05-06 22:55:44 +03:00
|
|
|
|
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_PML_ENABLE) {
|
|
|
|
|
if ((vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) == 0) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: PML is enabled without EPT"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vm->pml_address = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_PML_ADDRESS);
|
|
|
|
|
if (! IsValidPageAlignedPhyAddr(vm->pml_address)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: PML base phy addr malformed"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
vm->pml_index = VMread16(VMCS_16BIT_GUEST_PML_INDEX);
|
|
|
|
|
}
|
2010-04-07 21:12:17 +04:00
|
|
|
|
#endif
|
|
|
|
|
|
2012-10-26 22:43:53 +04:00
|
|
|
|
#endif // BX_SUPPORT_X86_64
|
|
|
|
|
|
2015-09-30 21:44:01 +03:00
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_TSC_SCALING) {
|
|
|
|
|
if ((vm->tsc_multiplier = VMread64(VMCS_64BIT_CONTROL_TSC_MULTIPLIER)) == 0) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: TSC multiplier should be non zero"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
//
|
|
|
|
|
// Load VM-exit control fields to VMCS Cache
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
vm->vmexit_ctrls = VMread32(VMCS_32BIT_CONTROL_VMEXIT_CONTROLS);
|
|
|
|
|
vm->vmexit_msr_store_cnt = VMread32(VMCS_32BIT_CONTROL_VMEXIT_MSR_STORE_COUNT);
|
|
|
|
|
vm->vmexit_msr_load_cnt = VMread32(VMCS_32BIT_CONTROL_VMEXIT_MSR_LOAD_COUNT);
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Check VM-exit control fields
|
|
|
|
|
//
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
if (~vm->vmexit_ctrls & VMX_CHECKS_USE_MSR_VMX_VMEXIT_CTRLS_LO) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmexit controls allowed 0-settings"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
2010-04-03 11:30:23 +04:00
|
|
|
|
if (vm->vmexit_ctrls & ~VMX_CHECKS_USE_MSR_VMX_VMEXIT_CTRLS_HI) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmexit controls allowed 1-settings"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
2011-07-03 19:59:48 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if ((~vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VMX_PREEMPTION_TIMER_VMEXIT) && (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_VMX_PREEMPTION_TIMER)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: save_VMX_preemption_timer VMEXIT control is set but VMX_preemption_timer VMEXEC control is clear"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (vm->vmexit_msr_store_cnt > 0) {
|
2010-03-27 19:30:01 +03:00
|
|
|
|
vm->vmexit_msr_store_addr = VMread64(VMCS_64BIT_CONTROL_VMEXIT_MSR_STORE_ADDR);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if ((vm->vmexit_msr_store_addr & 0xf) != 0 || ! IsValidPhyAddr(vm->vmexit_msr_store_addr)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS VMEXIT CTRL: msr store addr malformed"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit64u last_byte = vm->vmexit_msr_store_addr + (vm->vmexit_msr_store_cnt * 16) - 1;
|
|
|
|
|
if (! IsValidPhyAddr(last_byte)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS VMEXIT CTRL: msr store addr too high"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vm->vmexit_msr_load_cnt > 0) {
|
2010-03-27 19:30:01 +03:00
|
|
|
|
vm->vmexit_msr_load_addr = VMread64(VMCS_64BIT_CONTROL_VMEXIT_MSR_LOAD_ADDR);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if ((vm->vmexit_msr_load_addr & 0xf) != 0 || ! IsValidPhyAddr(vm->vmexit_msr_load_addr)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS VMEXIT CTRL: msr load addr malformed"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit64u last_byte = (Bit64u) vm->vmexit_msr_load_addr + (vm->vmexit_msr_load_cnt * 16) - 1;
|
|
|
|
|
if (! IsValidPhyAddr(last_byte)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS VMEXIT CTRL: msr load addr too high"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Load VM-entry control fields to VMCS Cache
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
vm->vmentry_ctrls = VMread32(VMCS_32BIT_CONTROL_VMENTRY_CONTROLS);
|
|
|
|
|
vm->vmentry_msr_load_cnt = VMread32(VMCS_32BIT_CONTROL_VMENTRY_MSR_LOAD_COUNT);
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Check VM-entry control fields
|
|
|
|
|
//
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
if (~vm->vmentry_ctrls & VMX_CHECKS_USE_MSR_VMX_VMENTRY_CTRLS_LO) {
|
2009-05-21 14:39:40 +04:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmentry controls allowed 0-settings"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
2010-04-03 11:30:23 +04:00
|
|
|
|
if (vm->vmentry_ctrls & ~VMX_CHECKS_USE_MSR_VMX_VMENTRY_CTRLS_HI) {
|
2009-05-21 14:39:40 +04:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmentry controls allowed 1-settings"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
2009-05-30 19:09:38 +04:00
|
|
|
|
if (vm->vmentry_ctrls & VMX_VMENTRY_CTRL1_DEACTIVATE_DUAL_MONITOR_TREATMENT) {
|
|
|
|
|
if (! BX_CPU_THIS_PTR in_smm) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMENTRY from outside SMM with dual-monitor treatment enabled"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (vm->vmentry_msr_load_cnt > 0) {
|
2010-03-27 19:30:01 +03:00
|
|
|
|
vm->vmentry_msr_load_addr = VMread64(VMCS_64BIT_CONTROL_VMENTRY_MSR_LOAD_ADDR);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if ((vm->vmentry_msr_load_addr & 0xf) != 0 || ! IsValidPhyAddr(vm->vmentry_msr_load_addr)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS VMENTRY CTRL: msr load addr malformed"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit64u last_byte = vm->vmentry_msr_load_addr + (vm->vmentry_msr_load_cnt * 16) - 1;
|
|
|
|
|
if (! IsValidPhyAddr(last_byte)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS VMENTRY CTRL: msr load addr too high"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Check VM-entry event injection info
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
vm->vmentry_interr_info = VMread32(VMCS_32BIT_CONTROL_VMENTRY_INTERRUPTION_INFO);
|
|
|
|
|
vm->vmentry_excep_err_code = VMread32(VMCS_32BIT_CONTROL_VMENTRY_EXCEPTION_ERR_CODE);
|
|
|
|
|
vm->vmentry_instr_length = VMread32(VMCS_32BIT_CONTROL_VMENTRY_INSTRUCTION_LENGTH);
|
|
|
|
|
|
|
|
|
|
if (VMENTRY_INJECTING_EVENT(vm->vmentry_interr_info)) {
|
|
|
|
|
|
|
|
|
|
/* the VMENTRY injecting event to the guest */
|
|
|
|
|
unsigned vector = vm->vmentry_interr_info & 0xff;
|
|
|
|
|
unsigned event_type = (vm->vmentry_interr_info >> 8) & 7;
|
|
|
|
|
unsigned push_error = (vm->vmentry_interr_info >> 11) & 1;
|
|
|
|
|
unsigned error_code = push_error ? vm->vmentry_excep_err_code : 0;
|
|
|
|
|
|
|
|
|
|
unsigned push_error_reference = 0;
|
2013-02-14 23:31:42 +04:00
|
|
|
|
if (event_type == BX_HARDWARE_EXCEPTION && vector < BX_CPU_HANDLED_EXCEPTIONS)
|
|
|
|
|
push_error_reference = exceptions_info[vector].push_error;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (vm->vmentry_interr_info & 0x7ffff000) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMENTRY broken interruption info field"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (event_type) {
|
|
|
|
|
case BX_EXTERNAL_INTERRUPT:
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_NMI:
|
|
|
|
|
if (vector != 2) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMENTRY bad injected event vector %d", vector));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
2013-03-06 01:12:43 +04:00
|
|
|
|
/*
|
|
|
|
|
// injecting NMI
|
|
|
|
|
if (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) {
|
|
|
|
|
if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMENTRY injected NMI vector when blocked by NMI in interruptibility state", vector));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
*/
|
2009-01-31 13:43:24 +03:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_HARDWARE_EXCEPTION:
|
|
|
|
|
if (vector > 31) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMENTRY bad injected event vector %d", vector));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_SOFTWARE_INTERRUPT:
|
|
|
|
|
case BX_PRIVILEGED_SOFTWARE_INTERRUPT:
|
|
|
|
|
case BX_SOFTWARE_EXCEPTION:
|
|
|
|
|
if (vm->vmentry_instr_length == 0 || vm->vmentry_instr_length > 15) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMENTRY bad injected event instr length"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2012-10-06 00:48:22 +04:00
|
|
|
|
case 7: /* MTF */
|
|
|
|
|
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_MONITOR_TRAP_FLAG)) {
|
|
|
|
|
if (vector != 0) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMENTRY bad MTF injection with vector=%d", vector));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
default:
|
|
|
|
|
BX_ERROR(("VMFAIL: VMENTRY bad injected event type %d", event_type));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
2013-02-14 23:31:42 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST) {
|
|
|
|
|
unsigned protected_mode_guest = (Bit32u) VMread_natural(VMCS_GUEST_CR0) & BX_CR0_PE_MASK;
|
|
|
|
|
if (! protected_mode_guest) push_error_reference = 0;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (push_error != push_error_reference) {
|
2013-02-14 23:31:42 +04:00
|
|
|
|
BX_ERROR(("VMFAIL: VMENTRY injected event vector %d broken error code", vector));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
2010-04-09 15:31:55 +04:00
|
|
|
|
|
2012-09-12 08:12:58 +04:00
|
|
|
|
if (push_error) {
|
|
|
|
|
if (error_code & 0xffff0000) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMENTRY bad error code 0x%08x for injected event %d", error_code, vector));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return VMXERR_NO_ERROR;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VMX_error_code BX_CPU_C::VMenterLoadCheckHostState(void)
|
|
|
|
|
{
|
|
|
|
|
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
|
|
|
|
VMCS_HOST_STATE *host_state = &vm->host_state;
|
|
|
|
|
bx_bool x86_64_host = 0, x86_64_guest = 0;
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// VM Host State Checks Related to Address-Space Size
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
Bit32u vmexit_ctrls = vm->vmexit_ctrls;
|
|
|
|
|
if (vmexit_ctrls & VMX_VMEXIT_CTRL1_HOST_ADDR_SPACE_SIZE) {
|
|
|
|
|
x86_64_host = 1;
|
|
|
|
|
}
|
|
|
|
|
Bit32u vmentry_ctrls = vm->vmentry_ctrls;
|
|
|
|
|
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_X86_64_GUEST) {
|
|
|
|
|
x86_64_guest = 1;
|
|
|
|
|
}
|
|
|
|
|
|
2010-04-30 00:03:03 +04:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (long_mode()) {
|
|
|
|
|
if (! x86_64_host) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS x86-64 host control invalid on VMENTRY"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-04-30 00:03:03 +04:00
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
{
|
|
|
|
|
if (x86_64_host || x86_64_guest) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS x86-64 guest(%d)/host(%d) controls invalid on VMENTRY", x86_64_guest, x86_64_host));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Load and Check VM Host State to VMCS Cache
|
|
|
|
|
//
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
host_state->cr0 = (bx_address) VMread_natural(VMCS_HOST_CR0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (~host_state->cr0 & VMX_MSR_CR0_FIXED0) {
|
2009-03-27 12:37:48 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host state invalid CR0 0x%08x", (Bit32u) host_state->cr0));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (host_state->cr0 & ~VMX_MSR_CR0_FIXED1) {
|
2009-03-27 12:37:48 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host state invalid CR0 0x%08x", (Bit32u) host_state->cr0));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
host_state->cr3 = (bx_address) VMread_natural(VMCS_HOST_CR3);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
2009-09-30 09:57:21 +04:00
|
|
|
|
if (! IsValidPhyAddr(host_state->cr3)) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host state invalid CR3"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
host_state->cr4 = (bx_address) VMread_natural(VMCS_HOST_CR4);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (~host_state->cr4 & VMX_MSR_CR4_FIXED0) {
|
2009-03-28 16:42:09 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host state invalid CR4 0x" FMT_ADDRX, host_state->cr4));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
if (host_state->cr4 & ~VMX_MSR_CR4_FIXED1) {
|
2009-03-28 16:42:09 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host state invalid CR4 0x" FMT_ADDRX, host_state->cr4));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for(int n=0; n<6; n++) {
|
2009-07-21 15:56:26 +04:00
|
|
|
|
host_state->segreg_selector[n] = VMread16(VMCS_16BIT_HOST_ES_SELECTOR + 2*n);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (host_state->segreg_selector[n] & 7) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host segreg %d TI/RPL != 0", n));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (host_state->segreg_selector[BX_SEG_REG_CS] == 0) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host CS selector 0"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (! x86_64_host && host_state->segreg_selector[BX_SEG_REG_SS] == 0) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host SS selector 0"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
2009-07-21 15:56:26 +04:00
|
|
|
|
host_state->tr_selector = VMread16(VMCS_16BIT_HOST_TR_SELECTOR);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (! host_state->tr_selector || (host_state->tr_selector & 7) != 0) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS invalid host TR selector"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
host_state->tr_base = (bx_address) VMread_natural(VMCS_HOST_TR_BASE);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (! IsCanonical(host_state->tr_base)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host TR BASE non canonical"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
host_state->fs_base = (bx_address) VMread_natural(VMCS_HOST_FS_BASE);
|
|
|
|
|
host_state->gs_base = (bx_address) VMread_natural(VMCS_HOST_GS_BASE);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (! IsCanonical(host_state->fs_base)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host FS BASE non canonical"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
if (! IsCanonical(host_state->gs_base)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host GS BASE non canonical"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
host_state->gdtr_base = (bx_address) VMread_natural(VMCS_HOST_GDTR_BASE);
|
|
|
|
|
host_state->idtr_base = (bx_address) VMread_natural(VMCS_HOST_IDTR_BASE);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (! IsCanonical(host_state->gdtr_base)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host GDTR BASE non canonical"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
if (! IsCanonical(host_state->idtr_base)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host IDTR BASE non canonical"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
host_state->sysenter_esp_msr = (bx_address) VMread_natural(VMCS_HOST_IA32_SYSENTER_ESP_MSR);
|
|
|
|
|
host_state->sysenter_eip_msr = (bx_address) VMread_natural(VMCS_HOST_IA32_SYSENTER_EIP_MSR);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
host_state->sysenter_cs_msr = (Bit16u) VMread32(VMCS_32BIT_HOST_IA32_SYSENTER_CS_MSR);
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (! IsCanonical(host_state->sysenter_esp_msr)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host SYSENTER_ESP_MSR non canonical"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (! IsCanonical(host_state->sysenter_eip_msr)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host SYSENTER_EIP_MSR non canonical"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2010-03-27 00:26:08 +03:00
|
|
|
|
if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_PAT_MSR) {
|
|
|
|
|
host_state->pat_msr = VMread64(VMCS_64BIT_HOST_IA32_PAT);
|
|
|
|
|
if (! isValidMSR_PAT(host_state->pat_msr)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: invalid Memory Type in host MSR_PAT"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#endif
|
2010-03-27 00:26:08 +03:00
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
host_state->rsp = (bx_address) VMread_natural(VMCS_HOST_RSP);
|
|
|
|
|
host_state->rip = (bx_address) VMread_natural(VMCS_HOST_RIP);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
2010-04-03 11:30:23 +04:00
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2010-03-27 12:27:40 +03:00
|
|
|
|
if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_EFER_MSR) {
|
|
|
|
|
host_state->efer_msr = VMread64(VMCS_64BIT_HOST_IA32_EFER);
|
2011-08-04 23:02:49 +04:00
|
|
|
|
if (host_state->efer_msr & ~((Bit64u) BX_CPU_THIS_PTR efer_suppmask)) {
|
2010-03-27 12:27:40 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host EFER reserved bits set !"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
bx_bool lme = (host_state->efer_msr >> 8) & 0x1;
|
|
|
|
|
bx_bool lma = (host_state->efer_msr >> 10) & 0x1;
|
|
|
|
|
if (lma != lme || lma != x86_64_host) {
|
2011-03-27 19:17:38 +04:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host EFER (0x%08x) inconsistent value !", (Bit32u) host_state->efer_msr));
|
2010-03-27 12:27:40 +03:00
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#endif
|
2010-03-27 12:27:40 +03:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (x86_64_host) {
|
2010-04-29 23:34:32 +04:00
|
|
|
|
if ((host_state->cr4 & BX_CR4_PAE_MASK) == 0) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host CR4.PAE=0 with x86-64 host"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
if (! IsCanonical(host_state->rip)) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host RIP non-canonical"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
if (GET32H(host_state->rip) != 0) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host RIP > 32 bit"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
2010-04-29 23:34:32 +04:00
|
|
|
|
if (host_state->cr4 & BX_CR4_PCIDE_MASK) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS host CR4.PCIDE set"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
return VMXERR_NO_ERROR;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BX_CPP_INLINE bx_bool IsLimitAccessRightsConsistent(Bit32u limit, Bit32u ar)
|
|
|
|
|
{
|
|
|
|
|
bx_bool g = (ar >> 15) & 1;
|
|
|
|
|
|
|
|
|
|
// access rights reserved bits set
|
|
|
|
|
if (ar & 0xfffe0f00) return 0;
|
|
|
|
|
|
|
|
|
|
if (g) {
|
|
|
|
|
// if any of the bits in limit[11:00] are '0 <=> G must be '0
|
|
|
|
|
if ((limit & 0xfff) != 0xfff)
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// if any of the bits in limit[31:20] are '1 <=> G must be '1
|
|
|
|
|
if ((limit & 0xfff00000) != 0)
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
|
|
|
|
|
{
|
|
|
|
|
static const char *segname[] = { "ES", "CS", "SS", "DS", "FS", "GS" };
|
2010-04-07 21:12:17 +04:00
|
|
|
|
int n;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
VMCS_GUEST_STATE guest;
|
|
|
|
|
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
|
|
|
|
|
|
|
|
|
*qualification = VMENTER_ERR_NO_ERROR;
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Load and Check Guest State from VMCS
|
|
|
|
|
//
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
guest.rflags = VMread_natural(VMCS_GUEST_RFLAGS);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
// RFLAGS reserved bits [63:22], bit 15, bit 5, bit 3 must be zero
|
|
|
|
|
if (guest.rflags & BX_CONST64(0xFFFFFFFFFFC08028)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: RFLAGS reserved bits are set"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
// RFLAGS[1] must be always set
|
|
|
|
|
if ((guest.rflags & 0x2) == 0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: RFLAGS[1] cleared"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bx_bool v8086_guest = 0;
|
|
|
|
|
if (guest.rflags & EFlagsVMMask)
|
|
|
|
|
v8086_guest = 1;
|
|
|
|
|
|
|
|
|
|
bx_bool x86_64_guest = 0; // can't be 1 if X86_64 is not supported (checked before)
|
|
|
|
|
Bit32u vmentry_ctrls = vm->vmentry_ctrls;
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_X86_64_GUEST) {
|
|
|
|
|
BX_DEBUG(("VMENTER to x86-64 guest"));
|
|
|
|
|
x86_64_guest = 1;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (x86_64_guest && v8086_guest) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: Enter to x86-64 guest with RFLAGS.VM"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
guest.cr0 = VMread_natural(VMCS_GUEST_CR0);
|
2010-04-09 15:31:55 +04:00
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST) {
|
2011-12-26 23:57:39 +04:00
|
|
|
|
if (~guest.cr0 & (VMX_MSR_CR0_FIXED0 & ~(BX_CR0_PE_MASK | BX_CR0_PG_MASK))) {
|
2010-04-09 15:31:55 +04:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR0"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-26 23:57:39 +04:00
|
|
|
|
bx_bool pe = (guest.cr0 & BX_CR0_PE_MASK) != 0;
|
|
|
|
|
bx_bool pg = (guest.cr0 & BX_CR0_PG_MASK) != 0;
|
2010-04-09 15:31:55 +04:00
|
|
|
|
if (pg && !pe) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS unrestricted guest CR0.PG without CR0.PE"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
{
|
|
|
|
|
if (~guest.cr0 & VMX_MSR_CR0_FIXED0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR0"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (guest.cr0 & ~VMX_MSR_CR0_FIXED1) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR0"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
2010-04-09 15:31:55 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
bx_bool real_mode_guest = 0;
|
2011-12-26 23:57:39 +04:00
|
|
|
|
if (! (guest.cr0 & BX_CR0_PE_MASK))
|
2010-04-09 15:31:55 +04:00
|
|
|
|
real_mode_guest = 1;
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
guest.cr3 = VMread_natural(VMCS_GUEST_CR3);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
2009-09-30 09:57:21 +04:00
|
|
|
|
if (! IsValidPhyAddr(guest.cr3)) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR3"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
guest.cr4 = VMread_natural(VMCS_GUEST_CR4);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (~guest.cr4 & VMX_MSR_CR4_FIXED0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR4"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (guest.cr4 & ~VMX_MSR_CR4_FIXED1) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest invalid CR4"));
|
|
|
|
|
return VMXERR_VMENTRY_INVALID_VM_HOST_STATE_FIELD;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
2010-04-29 23:34:32 +04:00
|
|
|
|
if (x86_64_guest) {
|
|
|
|
|
if ((guest.cr4 & BX_CR4_PAE_MASK) == 0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest CR4.PAE=0 in x86-64 mode"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
if (guest.cr4 & BX_CR4_PCIDE_MASK) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS CR4.PCIDE set in 32-bit guest"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_DBG_CTRLS) {
|
2011-07-22 00:58:54 +04:00
|
|
|
|
guest.dr7 = VMread_natural(VMCS_GUEST_DR7);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (GET32H(guest.dr7)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest invalid DR7"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Load and Check Guest State from VMCS - Segment Registers
|
|
|
|
|
//
|
|
|
|
|
|
2010-04-07 21:12:17 +04:00
|
|
|
|
for (n=0; n<6; n++) {
|
2009-07-21 15:56:26 +04:00
|
|
|
|
Bit16u selector = VMread16(VMCS_16BIT_GUEST_ES_SELECTOR + 2*n);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
bx_address base = (bx_address) VMread_natural(VMCS_GUEST_ES_BASE + 2*n);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
Bit32u limit = VMread32(VMCS_32BIT_GUEST_ES_LIMIT + 2*n);
|
2012-10-26 22:43:53 +04:00
|
|
|
|
Bit32u ar = VMread32(VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS + 2*n);
|
|
|
|
|
ar = vmx_from_ar_byte_rd(ar);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
bx_bool invalid = (ar >> 16) & 1;
|
|
|
|
|
|
2011-12-19 20:06:53 +04:00
|
|
|
|
set_segment_ar_data(&guest.sregs[n], !invalid,
|
|
|
|
|
(Bit16u) selector, base, limit, (Bit16u) ar);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (v8086_guest) {
|
|
|
|
|
// guest in V8086 mode
|
2009-07-21 15:56:26 +04:00
|
|
|
|
if (base != ((bx_address)(selector << 4))) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS v8086 guest bad %s.BASE", segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (limit != 0xffff) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS v8086 guest %s.LIMIT != 0xFFFF", segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
// present, expand-up read/write accessed, segment, DPL=3
|
|
|
|
|
if (ar != 0xF3) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS v8086 guest %s.AR != 0xF3", segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
continue; // go to next segment register
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (n >= BX_SEG_REG_FS) {
|
|
|
|
|
if (! IsCanonical(base)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest %s.BASE non canonical", segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
if (n != BX_SEG_REG_CS && invalid)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (n == BX_SEG_REG_SS && (selector & BX_SELECTOR_RPL_MASK) == 0) {
|
|
|
|
|
// SS is allowed to be NULL selector if going to 64-bit guest
|
|
|
|
|
if (x86_64_guest && guest.sregs[BX_SEG_REG_CS].cache.u.segment.l)
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (n < BX_SEG_REG_FS) {
|
|
|
|
|
if (GET32H(base) != 0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest %s.BASE > 32 bit", segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-02-04 19:05:47 +03:00
|
|
|
|
#endif
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (! guest.sregs[n].cache.segment) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest %s not segment", segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (! guest.sregs[n].cache.p) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest %s not present", segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (! IsLimitAccessRightsConsistent(limit, ar)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest %s.AR/LIMIT malformed", segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (n == BX_SEG_REG_CS) {
|
|
|
|
|
// CS checks
|
|
|
|
|
switch (guest.sregs[BX_SEG_REG_CS].cache.type) {
|
|
|
|
|
case BX_CODE_EXEC_ONLY_ACCESSED:
|
|
|
|
|
case BX_CODE_EXEC_READ_ACCESSED:
|
|
|
|
|
// non-conforming segment
|
2010-04-09 15:31:55 +04:00
|
|
|
|
if (guest.sregs[BX_SEG_REG_CS].selector.rpl != guest.sregs[BX_SEG_REG_CS].cache.dpl) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest non-conforming CS.RPL <> CS.DPL"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case BX_CODE_EXEC_ONLY_CONFORMING_ACCESSED:
|
|
|
|
|
case BX_CODE_EXEC_READ_CONFORMING_ACCESSED:
|
|
|
|
|
// conforming segment
|
2010-04-09 15:31:55 +04:00
|
|
|
|
if (guest.sregs[BX_SEG_REG_CS].selector.rpl < guest.sregs[BX_SEG_REG_CS].cache.dpl) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest non-conforming CS.RPL < CS.DPL"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
break;
|
2010-04-09 15:31:55 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
case BX_DATA_READ_WRITE_ACCESSED:
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST) {
|
|
|
|
|
if (guest.sregs[BX_SEG_REG_CS].cache.dpl != 0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS unrestricted guest CS.DPL != 0"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
// fall through
|
|
|
|
|
#endif
|
2009-01-31 13:43:24 +03:00
|
|
|
|
default:
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest CS.TYPE"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (x86_64_guest) {
|
|
|
|
|
if (guest.sregs[BX_SEG_REG_CS].cache.u.segment.d_b && guest.sregs[BX_SEG_REG_CS].cache.u.segment.l) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS x86_64 guest wrong CS.D_B/L"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
else if (n == BX_SEG_REG_SS) {
|
|
|
|
|
// SS checks
|
|
|
|
|
switch (guest.sregs[BX_SEG_REG_SS].cache.type) {
|
|
|
|
|
case BX_DATA_READ_WRITE_ACCESSED:
|
|
|
|
|
case BX_DATA_READ_WRITE_EXPAND_DOWN_ACCESSED:
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest SS.TYPE"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// DS, ES, FS, GS
|
|
|
|
|
if ((guest.sregs[n].cache.type & 0x1) == 0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest %s not ACCESSED", segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (guest.sregs[n].cache.type & 0x8) {
|
|
|
|
|
if ((guest.sregs[n].cache.type & 0x2) == 0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest CODE segment %s not READABLE", segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-04-09 15:31:55 +04:00
|
|
|
|
if (! (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST)) {
|
|
|
|
|
if (guest.sregs[n].cache.type < 11) {
|
|
|
|
|
// data segment or non-conforming code segment
|
|
|
|
|
if (guest.sregs[n].selector.rpl > guest.sregs[n].cache.dpl) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest non-conforming %s.RPL < %s.DPL", segname[n], segname[n]));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (! v8086_guest) {
|
2010-04-09 15:31:55 +04:00
|
|
|
|
if (! (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_UNRESTRICTED_GUEST)) {
|
|
|
|
|
if (guest.sregs[BX_SEG_REG_SS].selector.rpl != guest.sregs[BX_SEG_REG_CS].selector.rpl) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest CS.RPL != SS.RPL"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (guest.sregs[BX_SEG_REG_SS].selector.rpl != guest.sregs[BX_SEG_REG_SS].cache.dpl) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest SS.RPL <> SS.DPL"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
2010-04-09 15:31:55 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
else { // unrestricted guest
|
|
|
|
|
if (real_mode_guest || guest.sregs[BX_SEG_REG_CS].cache.type == BX_DATA_READ_WRITE_ACCESSED) {
|
|
|
|
|
if (guest.sregs[BX_SEG_REG_SS].cache.dpl != 0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS unrestricted guest SS.DPL != 0"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
2010-04-09 15:31:55 +04:00
|
|
|
|
#endif
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Load and Check Guest State from VMCS - GDTR/IDTR
|
|
|
|
|
//
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
Bit64u gdtr_base = VMread_natural(VMCS_GUEST_GDTR_BASE);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
Bit32u gdtr_limit = VMread32(VMCS_32BIT_GUEST_GDTR_LIMIT);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
Bit64u idtr_base = VMread_natural(VMCS_GUEST_IDTR_BASE);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
Bit32u idtr_limit = VMread32(VMCS_32BIT_GUEST_IDTR_LIMIT);
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (! IsCanonical(gdtr_base) || ! IsCanonical(idtr_base)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest IDTR/IDTR.BASE non canonical"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
if (gdtr_limit > 0xffff || idtr_limit > 0xffff) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest GDTR/IDTR limit > 0xFFFF"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Load and Check Guest State from VMCS - LDTR
|
|
|
|
|
//
|
|
|
|
|
|
2009-07-21 15:56:26 +04:00
|
|
|
|
Bit16u ldtr_selector = VMread16(VMCS_16BIT_GUEST_LDTR_SELECTOR);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
Bit64u ldtr_base = VMread_natural(VMCS_GUEST_LDTR_BASE);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
Bit32u ldtr_limit = VMread32(VMCS_32BIT_GUEST_LDTR_LIMIT);
|
2012-10-26 22:43:53 +04:00
|
|
|
|
Bit32u ldtr_ar = VMread32(VMCS_32BIT_GUEST_LDTR_ACCESS_RIGHTS);
|
|
|
|
|
ldtr_ar = vmx_from_ar_byte_rd(ldtr_ar);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
bx_bool ldtr_invalid = (ldtr_ar >> 16) & 1;
|
|
|
|
|
if (set_segment_ar_data(&guest.ldtr, !ldtr_invalid,
|
|
|
|
|
(Bit16u) ldtr_selector, ldtr_base, ldtr_limit, (Bit16u)(ldtr_ar)))
|
|
|
|
|
{
|
|
|
|
|
// ldtr is valid
|
|
|
|
|
if (guest.ldtr.selector.ti) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest LDTR.TI set"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (guest.ldtr.cache.type != BX_SYS_SEGMENT_LDT) {
|
2012-10-26 22:43:53 +04:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest incorrect LDTR type (%d)", guest.ldtr.cache.type));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (guest.ldtr.cache.segment) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest LDTR is not system segment"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (! guest.ldtr.cache.p) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest LDTR not present"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (! IsLimitAccessRightsConsistent(ldtr_limit, ldtr_ar)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest LDTR.AR/LIMIT malformed"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (! IsCanonical(ldtr_base)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest LDTR.BASE non canonical"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Load and Check Guest State from VMCS - TR
|
|
|
|
|
//
|
|
|
|
|
|
2009-07-21 15:56:26 +04:00
|
|
|
|
Bit16u tr_selector = VMread16(VMCS_16BIT_GUEST_TR_SELECTOR);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
Bit64u tr_base = VMread_natural(VMCS_GUEST_TR_BASE);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
Bit32u tr_limit = VMread32(VMCS_32BIT_GUEST_TR_LIMIT);
|
2012-10-26 22:43:53 +04:00
|
|
|
|
Bit32u tr_ar = VMread32(VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS);
|
|
|
|
|
tr_ar = vmx_from_ar_byte_rd(tr_ar);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
bx_bool tr_invalid = (tr_ar >> 16) & 1;
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (! IsCanonical(tr_base)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest TR.BASE non canonical"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
set_segment_ar_data(&guest.tr, !tr_invalid,
|
|
|
|
|
(Bit16u) tr_selector, tr_base, tr_limit, (Bit16u)(tr_ar));
|
|
|
|
|
|
|
|
|
|
if (tr_invalid) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest TR invalid"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (guest.tr.selector.ti) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest TR.TI set"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (guest.tr.cache.segment) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest TR is not system segment"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (! guest.tr.cache.p) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest TR not present"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (! IsLimitAccessRightsConsistent(tr_limit, tr_ar)) {
|
2012-10-26 22:43:53 +04:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest TR.AR/LIMIT malformed"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch(guest.tr.cache.type) {
|
|
|
|
|
case BX_SYS_SEGMENT_BUSY_386_TSS:
|
|
|
|
|
break;
|
|
|
|
|
case BX_SYS_SEGMENT_BUSY_286_TSS:
|
|
|
|
|
if (! x86_64_guest) break;
|
|
|
|
|
// fall through
|
|
|
|
|
default:
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest incorrect TR type"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Load and Check Guest State from VMCS - MSRS
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
guest.ia32_debugctl_msr = VMread64(VMCS_64BIT_GUEST_IA32_DEBUGCTL);
|
|
|
|
|
guest.smbase = VMread32(VMCS_32BIT_GUEST_SMBASE);
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
guest.sysenter_esp_msr = VMread_natural(VMCS_GUEST_IA32_SYSENTER_ESP_MSR);
|
|
|
|
|
guest.sysenter_eip_msr = VMread_natural(VMCS_GUEST_IA32_SYSENTER_EIP_MSR);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
guest.sysenter_cs_msr = VMread32(VMCS_32BIT_GUEST_IA32_SYSENTER_CS_MSR);
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (! IsCanonical(guest.sysenter_esp_msr)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest SYSENTER_ESP_MSR non canonical"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (! IsCanonical(guest.sysenter_eip_msr)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest SYSENTER_EIP_MSR non canonical"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2010-03-27 00:26:08 +03:00
|
|
|
|
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_PAT_MSR) {
|
|
|
|
|
guest.pat_msr = VMread64(VMCS_64BIT_GUEST_IA32_PAT);
|
|
|
|
|
if (! isValidMSR_PAT(guest.pat_msr)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: invalid Memory Type in guest MSR_PAT"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#endif
|
2010-03-27 00:26:08 +03:00
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
guest.rip = VMread_natural(VMCS_GUEST_RIP);
|
|
|
|
|
guest.rsp = VMread_natural(VMCS_GUEST_RSP);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2 && BX_SUPPORT_X86_64
|
2010-03-27 12:27:40 +03:00
|
|
|
|
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_EFER_MSR) {
|
|
|
|
|
guest.efer_msr = VMread64(VMCS_64BIT_GUEST_IA32_EFER);
|
2011-08-04 23:02:49 +04:00
|
|
|
|
if (guest.efer_msr & ~((Bit64u) BX_CPU_THIS_PTR efer_suppmask)) {
|
2010-03-27 12:27:40 +03:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest EFER reserved bits set !"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
bx_bool lme = (guest.efer_msr >> 8) & 0x1;
|
|
|
|
|
bx_bool lma = (guest.efer_msr >> 10) & 0x1;
|
2012-03-14 23:17:27 +04:00
|
|
|
|
if (lma != x86_64_guest) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest EFER.LMA doesn't match x86_64_guest !"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
if (lma != lme && (guest.cr0 & BX_CR0_PG_MASK) != 0) {
|
2011-03-27 19:17:38 +04:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest EFER (0x%08x) inconsistent value !", (Bit32u) guest.efer_msr));
|
2010-03-27 12:27:40 +03:00
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (! x86_64_guest || !guest.sregs[BX_SEG_REG_CS].cache.u.segment.l) {
|
|
|
|
|
if (GET32H(guest.rip) != 0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest RIP > 32 bit"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// Load and Check Guest Non-Registers State from VMCS
|
|
|
|
|
//
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
vm->vmcs_linkptr = VMread64(VMCS_64BIT_GUEST_LINK_POINTER);
|
|
|
|
|
if (vm->vmcs_linkptr != BX_INVALID_VMCSPTR) {
|
|
|
|
|
if (! IsValidPageAlignedPhyAddr(vm->vmcs_linkptr)) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
*qualification = (Bit64u) VMENTER_ERR_GUEST_STATE_LINK_POINTER;
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS link pointer malformed"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
2009-06-06 14:21:49 +04:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
Bit32u revision = VMXReadRevisionID((bx_phy_address) vm->vmcs_linkptr);
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VMCS_SHADOWING) {
|
|
|
|
|
if ((revision & BX_VMCS_SHADOW_BIT_MASK) == 0) {
|
|
|
|
|
*qualification = (Bit64u) VMENTER_ERR_GUEST_STATE_LINK_POINTER;
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS link pointer must indicate shadow VMCS revision ID = %d", revision));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
revision &= ~BX_VMCS_SHADOW_BIT_MASK;
|
|
|
|
|
}
|
2015-07-06 21:46:57 +03:00
|
|
|
|
if (revision != BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()) {
|
2009-06-06 14:21:49 +04:00
|
|
|
|
*qualification = (Bit64u) VMENTER_ERR_GUEST_STATE_LINK_POINTER;
|
2015-07-06 21:46:57 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCS link pointer incorrect revision ID %d != %d", revision, BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()));
|
2009-06-06 14:21:49 +04:00
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (! BX_CPU_THIS_PTR in_smm || (vmentry_ctrls & VMX_VMENTRY_CTRL1_SMM_ENTER) != 0) {
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (vm->vmcs_linkptr == BX_CPU_THIS_PTR vmcsptr) {
|
2009-06-06 14:21:49 +04:00
|
|
|
|
*qualification = (Bit64u) VMENTER_ERR_GUEST_STATE_LINK_POINTER;
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS link pointer equal to current VMCS pointer"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (vm->vmcs_linkptr == BX_CPU_THIS_PTR vmxonptr) {
|
2009-06-06 14:21:49 +04:00
|
|
|
|
*qualification = (Bit64u) VMENTER_ERR_GUEST_STATE_LINK_POINTER;
|
|
|
|
|
BX_ERROR(("VMFAIL: VMCS link pointer equal to VMXON pointer"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-12-10 22:58:25 +04:00
|
|
|
|
guest.tmpDR6 = (Bit32u) VMread_natural(VMCS_GUEST_PENDING_DBG_EXCEPTIONS);
|
2009-02-02 01:23:33 +03:00
|
|
|
|
if (guest.tmpDR6 & BX_CONST64(0xFFFFFFFFFFFFAFF0)) {
|
2009-02-02 21:59:44 +03:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest tmpDR6 reserved bits"));
|
2009-02-01 23:47:06 +03:00
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
guest.activity_state = VMread32(VMCS_32BIT_GUEST_ACTIVITY_STATE);
|
2015-09-28 21:42:05 +03:00
|
|
|
|
if (guest.activity_state > BX_VMX_LAST_ACTIVITY_STATE) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest activity state %d", guest.activity_state));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
2013-04-09 19:43:15 +04:00
|
|
|
|
if (guest.activity_state == BX_ACTIVITY_STATE_HLT) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (guest.sregs[BX_SEG_REG_SS].cache.dpl != 0) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest HLT state with SS.DPL=%d", guest.sregs[BX_SEG_REG_SS].cache.dpl));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-02-01 23:47:06 +03:00
|
|
|
|
guest.interruptibility_state = VMread32(VMCS_32BIT_GUEST_INTERRUPTIBILITY_STATE);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (guest.interruptibility_state & ~BX_VMX_INTERRUPTIBILITY_STATE_MASK) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest interruptibility state broken"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
2013-04-09 19:43:15 +04:00
|
|
|
|
if (guest.interruptibility_state & 0x3) {
|
|
|
|
|
if (guest.activity_state != BX_ACTIVITY_STATE_ACTIVE) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest interruptibility state broken when entering non active CPU state %d", guest.activity_state));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if ((guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_STI) &&
|
|
|
|
|
(guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_MOV_SS))
|
|
|
|
|
{
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest interruptibility state broken"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((guest.rflags & EFlagsIFMask) == 0) {
|
|
|
|
|
if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_STI) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest interrupts can't be blocked by STI when EFLAGS.IF = 0"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (VMENTRY_INJECTING_EVENT(vm->vmentry_interr_info)) {
|
|
|
|
|
unsigned event_type = (vm->vmentry_interr_info >> 8) & 7;
|
2016-05-03 22:29:22 +03:00
|
|
|
|
unsigned vector = vm->vmentry_interr_info & 0xff;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (event_type == BX_EXTERNAL_INTERRUPT) {
|
2013-04-09 19:43:15 +04:00
|
|
|
|
if ((guest.interruptibility_state & 0x3) != 0 || (guest.rflags & EFlagsIFMask) == 0) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest interrupts blocked when injecting external interrupt"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (event_type == BX_NMI) {
|
2013-04-09 19:43:15 +04:00
|
|
|
|
if ((guest.interruptibility_state & 0x3) != 0) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS guest interrupts blocked when injecting NMI"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-10-09 09:18:14 +03:00
|
|
|
|
if (guest.activity_state == BX_ACTIVITY_STATE_WAIT_FOR_SIPI) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: No guest interruptions are allowed when entering Wait-For-Sipi state"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
2016-05-03 22:29:22 +03:00
|
|
|
|
if (guest.activity_state == BX_ACTIVITY_STATE_SHUTDOWN && event_type != BX_NMI && vector != BX_MC_EXCEPTION) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: Only NMI or #MC guest interruption is allowed when entering shutdown state"));
|
2015-10-09 09:18:14 +03:00
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_SMM_ENTER) {
|
|
|
|
|
if (! (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_SMI_BLOCKED)) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS SMM guest should block SMI"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
2013-04-09 19:43:15 +04:00
|
|
|
|
|
|
|
|
|
if (guest.activity_state == BX_ACTIVITY_STATE_WAIT_FOR_SIPI) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: The activity state must not indicate the wait-for-SIPI state if entering to SMM guest"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2009-05-21 14:39:40 +04:00
|
|
|
|
if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_SMI_BLOCKED) {
|
|
|
|
|
if (! BX_CPU_THIS_PTR in_smm) {
|
|
|
|
|
BX_ERROR(("VMENTER FAIL: VMCS SMI blocked when not in SMM mode"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-26 23:57:39 +04:00
|
|
|
|
if (! x86_64_guest && (guest.cr4 & BX_CR4_PAE_MASK) != 0 && (guest.cr0 & BX_CR0_PG_MASK) != 0) {
|
2010-04-07 21:12:17 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
|
|
|
|
|
for (n=0;n<4;n++)
|
|
|
|
|
guest.pdptr[n] = VMread64(VMCS_64BIT_GUEST_IA32_PDPTE0 + 2*n);
|
|
|
|
|
|
|
|
|
|
if (! CheckPDPTR(guest.pdptr)) {
|
|
|
|
|
*qualification = VMENTER_ERR_GUEST_STATE_PDPTR_LOADING;
|
2012-01-04 00:27:40 +04:00
|
|
|
|
BX_ERROR(("VMENTER: EPT Guest State PDPTRs Checks Failed"));
|
2010-04-07 21:12:17 +04:00
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
{
|
|
|
|
|
if (! CheckPDPTR(guest.cr3)) {
|
|
|
|
|
*qualification = VMENTER_ERR_GUEST_STATE_PDPTR_LOADING;
|
|
|
|
|
BX_ERROR(("VMENTER: Guest State PDPTRs Checks Failed"));
|
|
|
|
|
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
|
|
|
|
|
}
|
2009-05-31 11:49:04 +04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
//
|
|
|
|
|
// Load Guest State -> VMENTER
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2012-03-14 23:17:27 +04:00
|
|
|
|
// modify EFER.LMA / EFER.LME before setting CR4
|
2012-10-26 22:43:53 +04:00
|
|
|
|
|
|
|
|
|
// It is recommended that 64-bit VMM software use the 1-settings of the "load IA32_EFER"
|
|
|
|
|
// VM entry control and the "save IA32_EFER" VM-exit control. If VMentry is establishing
|
2012-09-13 09:33:05 +04:00
|
|
|
|
// CR0.PG=0 and if the "IA-32e mode guest" and "load IA32_EFER" VM entry controls are
|
|
|
|
|
// both 0, VM entry leaves IA32_EFER.LME unmodified (i.e., the host value will persist
|
|
|
|
|
// in the guest) -- Quote from Intel SDM
|
2010-03-27 12:27:40 +03:00
|
|
|
|
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_EFER_MSR) {
|
2011-12-10 22:58:25 +04:00
|
|
|
|
BX_CPU_THIS_PTR efer.set32((Bit32u) guest.efer_msr);
|
2010-03-27 12:27:40 +03:00
|
|
|
|
}
|
2010-04-03 11:30:23 +04:00
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
{
|
2012-03-14 23:17:27 +04:00
|
|
|
|
if (x86_64_guest) {
|
|
|
|
|
BX_CPU_THIS_PTR efer.set32(BX_CPU_THIS_PTR efer.get32() | (BX_EFER_LME_MASK | BX_EFER_LMA_MASK));
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// when loading unrestricted guest with CR0.PG=0 EFER.LME is unmodified
|
|
|
|
|
// (i.e., the host value will persist in the guest)
|
|
|
|
|
if (guest.cr0 & BX_CR0_PG_MASK)
|
|
|
|
|
BX_CPU_THIS_PTR efer.set32(BX_CPU_THIS_PTR efer.get32() & ~(BX_EFER_LME_MASK | BX_EFER_LMA_MASK));
|
|
|
|
|
else
|
|
|
|
|
BX_CPU_THIS_PTR efer.set32(BX_CPU_THIS_PTR efer.get32() & ~BX_EFER_LMA_MASK);
|
|
|
|
|
}
|
2010-03-27 12:27:40 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#endif
|
|
|
|
|
|
2010-03-26 00:33:07 +03:00
|
|
|
|
// keep bits ET(4), reserved bits 15:6, 17, 28:19, NW(29), CD(30)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#define VMX_KEEP_CR0_BITS 0x7FFAFFD0
|
|
|
|
|
|
2011-12-26 20:33:13 +04:00
|
|
|
|
guest.cr0 = (BX_CPU_THIS_PTR cr0.get32() & VMX_KEEP_CR0_BITS) | (guest.cr0 & ~VMX_KEEP_CR0_BITS);
|
2010-03-26 00:33:07 +03:00
|
|
|
|
|
|
|
|
|
if (! check_CR0(guest.cr0)) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_PANIC(("VMENTER CR0 is broken !"));
|
|
|
|
|
}
|
2010-03-26 00:33:07 +03:00
|
|
|
|
if (! check_CR4(guest.cr4)) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_PANIC(("VMENTER CR4 is broken !"));
|
|
|
|
|
}
|
|
|
|
|
|
2011-12-10 22:58:25 +04:00
|
|
|
|
BX_CPU_THIS_PTR cr0.set32((Bit32u) guest.cr0);
|
|
|
|
|
BX_CPU_THIS_PTR cr4.set32((Bit32u) guest.cr4);
|
2010-04-04 13:04:12 +04:00
|
|
|
|
BX_CPU_THIS_PTR cr3 = guest.cr3;
|
2010-03-26 00:33:07 +03:00
|
|
|
|
|
2010-04-07 21:12:17 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
|
|
|
|
|
// load PDPTR only in PAE legacy mode
|
|
|
|
|
if (BX_CPU_THIS_PTR cr0.get_PG() && BX_CPU_THIS_PTR cr4.get_PAE() && !x86_64_guest) {
|
|
|
|
|
for (n = 0; n < 4; n++)
|
|
|
|
|
BX_CPU_THIS_PTR PDPTR_CACHE.entry[n] = guest.pdptr[n];
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_DBG_CTRLS) {
|
|
|
|
|
// always clear bits 15:14 and set bit 10
|
2011-03-15 23:20:15 +03:00
|
|
|
|
BX_CPU_THIS_PTR dr7.set32((guest.dr7 & ~0xc000) | 0x400);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
RIP = BX_CPU_THIS_PTR prev_rip = guest.rip;
|
|
|
|
|
RSP = guest.rsp;
|
|
|
|
|
|
2012-01-05 01:36:39 +04:00
|
|
|
|
BX_CPU_THIS_PTR async_event = 0;
|
|
|
|
|
|
|
|
|
|
setEFlags((Bit32u) guest.rflags);
|
2009-12-17 14:11:58 +03:00
|
|
|
|
|
2010-01-31 21:06:45 +03:00
|
|
|
|
#ifdef BX_SUPPORT_CS_LIMIT_DEMOTION
|
2009-12-17 14:11:58 +03:00
|
|
|
|
// Handle special case of CS.LIMIT demotion (new descriptor limit is
|
|
|
|
|
// smaller than current one)
|
|
|
|
|
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled > guest.sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled)
|
|
|
|
|
BX_CPU_THIS_PTR iCache.flushICacheEntries();
|
|
|
|
|
#endif
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
for(unsigned segreg=0; segreg<6; segreg++)
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg] = guest.sregs[segreg];
|
|
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR gdtr.base = gdtr_base;
|
|
|
|
|
BX_CPU_THIS_PTR gdtr.limit = gdtr_limit;
|
|
|
|
|
BX_CPU_THIS_PTR idtr.base = idtr_base;
|
|
|
|
|
BX_CPU_THIS_PTR idtr.limit = idtr_limit;
|
|
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR ldtr = guest.ldtr;
|
|
|
|
|
BX_CPU_THIS_PTR tr = guest.tr;
|
|
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR msr.sysenter_esp_msr = guest.sysenter_esp_msr;
|
|
|
|
|
BX_CPU_THIS_PTR msr.sysenter_eip_msr = guest.sysenter_eip_msr;
|
|
|
|
|
BX_CPU_THIS_PTR msr.sysenter_cs_msr = guest.sysenter_cs_msr;
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2010-03-27 00:26:08 +03:00
|
|
|
|
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_PAT_MSR) {
|
|
|
|
|
BX_CPU_THIS_PTR msr.pat = guest.pat_msr;
|
|
|
|
|
}
|
2013-01-09 01:03:22 +04:00
|
|
|
|
vm->ple.last_pause_time = vm->ple.first_pause_time = 0;
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#endif
|
2010-03-27 00:26:08 +03:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
//
|
|
|
|
|
// Load Guest Non-Registers State -> VMENTER
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
if (vm->vmentry_ctrls & VMX_VMENTRY_CTRL1_SMM_ENTER)
|
|
|
|
|
BX_PANIC(("VMENTER: entry to SMM is not implemented yet !"));
|
|
|
|
|
|
|
|
|
|
if (VMENTRY_INJECTING_EVENT(vm->vmentry_interr_info)) {
|
|
|
|
|
// the VMENTRY injecting event to the guest
|
|
|
|
|
BX_CPU_THIS_PTR inhibit_mask = 0; // do not block interrupts
|
2009-02-02 01:23:33 +03:00
|
|
|
|
BX_CPU_THIS_PTR debug_trap = 0;
|
2013-04-10 00:36:02 +04:00
|
|
|
|
guest.activity_state = BX_ACTIVITY_STATE_ACTIVE;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
else {
|
2009-02-02 21:59:44 +03:00
|
|
|
|
if (guest.tmpDR6 & (1 << 12))
|
|
|
|
|
BX_CPU_THIS_PTR debug_trap = guest.tmpDR6 & 0x0000400F;
|
|
|
|
|
else
|
|
|
|
|
BX_CPU_THIS_PTR debug_trap = guest.tmpDR6 & 0x00004000;
|
2012-07-11 19:07:54 +04:00
|
|
|
|
if (BX_CPU_THIS_PTR debug_trap) {
|
|
|
|
|
BX_CPU_THIS_PTR debug_trap |= BX_DEBUG_TRAP_HIT;
|
2009-02-02 01:23:33 +03:00
|
|
|
|
BX_CPU_THIS_PTR async_event = 1;
|
2012-07-11 19:07:54 +04:00
|
|
|
|
}
|
2009-02-02 01:23:33 +03:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_STI)
|
2011-12-21 10:17:45 +04:00
|
|
|
|
inhibit_interrupts(BX_INHIBIT_INTERRUPTS);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
else if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_BY_MOV_SS)
|
2011-12-21 10:17:45 +04:00
|
|
|
|
inhibit_interrupts(BX_INHIBIT_INTERRUPTS_BY_MOVSS);
|
|
|
|
|
else
|
|
|
|
|
BX_CPU_THIS_PTR inhibit_mask = 0;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-06-24 17:38:34 +04:00
|
|
|
|
if (guest.interruptibility_state & BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED) {
|
2013-03-06 01:12:43 +04:00
|
|
|
|
if (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)
|
|
|
|
|
mask_event(BX_EVENT_VMX_VIRTUAL_NMI);
|
|
|
|
|
else
|
|
|
|
|
mask_event(BX_EVENT_NMI);
|
2011-06-24 17:38:34 +04:00
|
|
|
|
}
|
2012-09-25 17:53:26 +04:00
|
|
|
|
|
2013-04-09 19:43:15 +04:00
|
|
|
|
if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_NMI_WINDOW_EXITING)
|
2013-03-06 01:12:43 +04:00
|
|
|
|
signal_event(BX_EVENT_VMX_VIRTUAL_NMI);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-04-09 19:43:15 +04:00
|
|
|
|
if (vm->vmexec_ctrls2 & VMX_VM_EXEC_CTRL2_INTERRUPT_WINDOW_VMEXIT)
|
2012-09-25 13:35:38 +04:00
|
|
|
|
signal_event(BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2012-03-25 15:54:32 +04:00
|
|
|
|
handleCpuContextChange();
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2012-05-20 00:36:40 +04:00
|
|
|
|
#if BX_SUPPORT_MONITOR_MWAIT
|
|
|
|
|
BX_CPU_THIS_PTR monitor.reset_monitor();
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-03-23 01:18:40 +03:00
|
|
|
|
BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_CONTEXT_SWITCH, 0);
|
|
|
|
|
|
2013-04-10 00:36:02 +04:00
|
|
|
|
if (guest.activity_state) {
|
2013-06-05 00:28:27 +04:00
|
|
|
|
BX_DEBUG(("VMEntry to non-active CPU state %d", guest.activity_state));
|
2013-04-09 19:43:15 +04:00
|
|
|
|
enter_sleep_state(guest.activity_state);
|
2013-04-10 00:36:02 +04:00
|
|
|
|
}
|
2013-04-09 19:43:15 +04:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
return VMXERR_NO_ERROR;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BX_CPU_C::VMenterInjectEvents(void)
|
|
|
|
|
{
|
|
|
|
|
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
|
|
|
|
|
|
|
|
|
if (! VMENTRY_INJECTING_EVENT(vm->vmentry_interr_info))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
/* the VMENTRY injecting event to the guest */
|
|
|
|
|
unsigned vector = vm->vmentry_interr_info & 0xff;
|
|
|
|
|
unsigned type = (vm->vmentry_interr_info >> 8) & 7;
|
|
|
|
|
unsigned push_error = vm->vmentry_interr_info & (1 << 11);
|
|
|
|
|
unsigned error_code = push_error ? vm->vmentry_excep_err_code : 0;
|
|
|
|
|
|
2012-10-06 00:48:22 +04:00
|
|
|
|
if (type == 7) {
|
|
|
|
|
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_MONITOR_TRAP_FLAG)) {
|
|
|
|
|
signal_event(BX_EVENT_VMX_MONITOR_TRAP_FLAG);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
bx_bool is_INT = 0;
|
|
|
|
|
switch(type) {
|
|
|
|
|
case BX_EXTERNAL_INTERRUPT:
|
|
|
|
|
case BX_HARDWARE_EXCEPTION:
|
|
|
|
|
BX_CPU_THIS_PTR EXT = 1;
|
|
|
|
|
break;
|
|
|
|
|
|
2012-10-04 20:15:58 +04:00
|
|
|
|
case BX_NMI:
|
2013-03-06 01:12:43 +04:00
|
|
|
|
if (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI)
|
|
|
|
|
mask_event(BX_EVENT_VMX_VIRTUAL_NMI);
|
|
|
|
|
else
|
|
|
|
|
mask_event(BX_EVENT_NMI);
|
|
|
|
|
|
2012-10-04 20:15:58 +04:00
|
|
|
|
BX_CPU_THIS_PTR EXT = 1;
|
|
|
|
|
break;
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
case BX_PRIVILEGED_SOFTWARE_INTERRUPT:
|
2010-03-18 00:55:19 +03:00
|
|
|
|
BX_CPU_THIS_PTR EXT = 1;
|
|
|
|
|
is_INT = 1;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_SOFTWARE_INTERRUPT:
|
2009-01-31 13:43:24 +03:00
|
|
|
|
case BX_SOFTWARE_EXCEPTION:
|
|
|
|
|
is_INT = 1;
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
BX_PANIC(("VMENTER: unsupported event injection type %d !", type));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// keep prev_rip value/unwind in case of event delivery failure
|
|
|
|
|
if (is_INT)
|
|
|
|
|
RIP += vm->vmentry_instr_length;
|
|
|
|
|
|
2012-01-05 23:42:58 +04:00
|
|
|
|
BX_DEBUG(("VMENTER: Injecting vector 0x%02x (error_code 0x%04x)", vector, error_code));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (type == BX_HARDWARE_EXCEPTION) {
|
|
|
|
|
// record exception the same way as BX_CPU_C::exception does
|
2012-01-11 10:27:35 +04:00
|
|
|
|
BX_ASSERT(vector < BX_CPU_HANDLED_EXCEPTIONS);
|
2012-10-05 00:52:27 +04:00
|
|
|
|
BX_CPU_THIS_PTR last_exception_type = exceptions_info[vector].exception_type;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
vm->idt_vector_info = vm->vmentry_interr_info & ~0x80000000;
|
|
|
|
|
vm->idt_vector_error_code = error_code;
|
|
|
|
|
|
|
|
|
|
interrupt(vector, type, push_error, error_code);
|
2009-12-10 10:57:32 +03:00
|
|
|
|
|
2012-10-05 00:52:27 +04:00
|
|
|
|
BX_CPU_THIS_PTR last_exception_type = 0; // error resolved
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit32u BX_CPU_C::LoadMSRs(Bit32u msr_cnt, bx_phy_address pAddr)
|
|
|
|
|
{
|
|
|
|
|
Bit64u msr_lo, msr_hi;
|
|
|
|
|
|
|
|
|
|
for (Bit32u msr = 1; msr <= msr_cnt; msr++) {
|
2009-02-17 22:20:47 +03:00
|
|
|
|
access_read_physical(pAddr, 8, &msr_lo);
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_VMX_LOAD_MSR_ACCESS, (Bit8u*)(&msr_lo));
|
2009-02-17 22:20:47 +03:00
|
|
|
|
access_read_physical(pAddr + 8, 8, &msr_hi);
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr + 8, 8, MEMTYPE(resolve_memtype(pAddr)), BX_READ, BX_VMX_LOAD_MSR_ACCESS, (Bit8u*)(&msr_hi));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (GET32H(msr_lo))
|
|
|
|
|
return msr;
|
|
|
|
|
|
|
|
|
|
Bit32u index = GET32L(msr_lo);
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
if (index == BX_MSR_FSBASE || index == BX_MSR_GSBASE)
|
|
|
|
|
return msr;
|
|
|
|
|
#endif
|
|
|
|
|
|
2014-08-31 23:22:41 +04:00
|
|
|
|
if (is_cpu_extension_supported(BX_ISA_X2APIC)) {
|
2011-04-21 17:27:42 +04:00
|
|
|
|
if ((index & 0xfffff800) == 0x800) // X2APIC range
|
|
|
|
|
return msr;
|
|
|
|
|
}
|
2010-04-08 19:50:39 +04:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (! wrmsr(index, msr_hi))
|
|
|
|
|
return msr;
|
|
|
|
|
|
|
|
|
|
pAddr += 16; // to next MSR
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit32u BX_CPU_C::StoreMSRs(Bit32u msr_cnt, bx_phy_address pAddr)
|
|
|
|
|
{
|
|
|
|
|
Bit64u msr_lo, msr_hi;
|
|
|
|
|
|
|
|
|
|
for (Bit32u msr = 1; msr <= msr_cnt; msr++) {
|
2009-02-17 22:20:47 +03:00
|
|
|
|
access_read_physical(pAddr, 8, &msr_lo);
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr, 8, MEMTYPE(resolve_memtype(pAddr)),
|
|
|
|
|
BX_READ, BX_VMX_STORE_MSR_ACCESS, (Bit8u*)(&msr_lo));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (GET32H(msr_lo))
|
|
|
|
|
return msr;
|
|
|
|
|
|
|
|
|
|
Bit32u index = GET32L(msr_lo);
|
|
|
|
|
|
2014-08-31 23:22:41 +04:00
|
|
|
|
if (is_cpu_extension_supported(BX_ISA_X2APIC)) {
|
2011-04-21 17:27:42 +04:00
|
|
|
|
if ((index & 0xfffff800) == 0x800) // X2APIC range
|
|
|
|
|
return msr;
|
|
|
|
|
}
|
2010-04-08 19:50:39 +04:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (! rdmsr(index, &msr_hi))
|
|
|
|
|
return msr;
|
|
|
|
|
|
2009-02-17 22:20:47 +03:00
|
|
|
|
access_write_physical(pAddr + 8, 8, &msr_hi);
|
2015-02-23 00:26:26 +03:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr + 8, 8, MEMTYPE(resolve_memtype(pAddr)),
|
|
|
|
|
BX_WRITE, BX_VMX_STORE_MSR_ACCESS, (Bit8u*)(&msr_hi));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
pAddr += 16; // to next MSR
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
// VMexit
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
void BX_CPU_C::VMexitSaveGuestState(void)
|
|
|
|
|
{
|
2009-02-01 23:47:06 +03:00
|
|
|
|
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
2010-04-07 21:12:17 +04:00
|
|
|
|
int n;
|
2009-02-01 23:47:06 +03:00
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_CR0, BX_CPU_THIS_PTR cr0.get32());
|
|
|
|
|
VMwrite_natural(VMCS_GUEST_CR3, BX_CPU_THIS_PTR cr3);
|
|
|
|
|
VMwrite_natural(VMCS_GUEST_CR4, BX_CPU_THIS_PTR cr4.get32());
|
2010-04-07 21:12:17 +04:00
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
|
|
|
|
|
// save only if guest running in legacy PAE mode
|
|
|
|
|
if (BX_CPU_THIS_PTR cr0.get_PG() && BX_CPU_THIS_PTR cr4.get_PAE() && !long_mode()) {
|
2011-12-19 20:06:53 +04:00
|
|
|
|
for(n=0; n<4; n++) {
|
2010-04-07 21:12:17 +04:00
|
|
|
|
VMwrite64(VMCS_64BIT_GUEST_IA32_PDPTE0 + 2*n, BX_CPU_THIS_PTR PDPTR_CACHE.entry[n]);
|
2011-12-19 20:06:53 +04:00
|
|
|
|
}
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-02-01 23:47:06 +03:00
|
|
|
|
if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_SAVE_DBG_CTRLS)
|
2011-07-22 00:58:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_DR7, BX_CPU_THIS_PTR dr7.get32());
|
2010-04-07 21:12:17 +04:00
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_RIP, RIP);
|
|
|
|
|
VMwrite_natural(VMCS_GUEST_RSP, RSP);
|
2012-01-10 00:52:15 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_RFLAGS, read_eflags());
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2010-04-07 21:12:17 +04:00
|
|
|
|
for (n=0; n<6; n++) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
Bit32u selector = BX_CPU_THIS_PTR sregs[n].selector.value;
|
|
|
|
|
bx_bool invalid = !BX_CPU_THIS_PTR sregs[n].cache.valid;
|
|
|
|
|
bx_address base = BX_CPU_THIS_PTR sregs[n].cache.u.segment.base;
|
|
|
|
|
Bit32u limit = BX_CPU_THIS_PTR sregs[n].cache.u.segment.limit_scaled;
|
2012-10-26 22:43:53 +04:00
|
|
|
|
Bit32u ar = (get_descriptor_h(&BX_CPU_THIS_PTR sregs[n].cache) & 0x00f0ff00) >> 8;
|
|
|
|
|
ar = vmx_from_ar_byte_wr(ar | (invalid << 16));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2009-07-21 15:56:26 +04:00
|
|
|
|
VMwrite16(VMCS_16BIT_GUEST_ES_SELECTOR + 2*n, selector);
|
2012-10-26 22:43:53 +04:00
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS + 2*n, ar);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_ES_BASE + 2*n, base);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_ES_LIMIT + 2*n, limit);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// save guest LDTR
|
|
|
|
|
Bit32u ldtr_selector = BX_CPU_THIS_PTR ldtr.selector.value;
|
|
|
|
|
bx_bool ldtr_invalid = !BX_CPU_THIS_PTR ldtr.cache.valid;
|
2009-04-05 23:09:44 +04:00
|
|
|
|
bx_address ldtr_base = BX_CPU_THIS_PTR ldtr.cache.u.segment.base;
|
|
|
|
|
Bit32u ldtr_limit = BX_CPU_THIS_PTR ldtr.cache.u.segment.limit_scaled;
|
2012-10-26 22:43:53 +04:00
|
|
|
|
Bit32u ldtr_ar = (get_descriptor_h(&BX_CPU_THIS_PTR ldtr.cache) & 0x00f0ff00) >> 8;
|
|
|
|
|
ldtr_ar = vmx_from_ar_byte_wr(ldtr_ar | (ldtr_invalid << 16));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2009-07-21 15:56:26 +04:00
|
|
|
|
VMwrite16(VMCS_16BIT_GUEST_LDTR_SELECTOR, ldtr_selector);
|
2012-10-26 22:43:53 +04:00
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_LDTR_ACCESS_RIGHTS, ldtr_ar);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_LDTR_BASE, ldtr_base);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_LDTR_LIMIT, ldtr_limit);
|
|
|
|
|
|
|
|
|
|
// save guest TR
|
|
|
|
|
Bit32u tr_selector = BX_CPU_THIS_PTR tr.selector.value;
|
|
|
|
|
bx_bool tr_invalid = !BX_CPU_THIS_PTR tr.cache.valid;
|
2009-04-05 23:09:44 +04:00
|
|
|
|
bx_address tr_base = BX_CPU_THIS_PTR tr.cache.u.segment.base;
|
|
|
|
|
Bit32u tr_limit = BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled;
|
2012-10-26 22:43:53 +04:00
|
|
|
|
Bit32u tr_ar = (get_descriptor_h(&BX_CPU_THIS_PTR tr.cache) & 0x00f0ff00) >> 8;
|
|
|
|
|
tr_ar = vmx_from_ar_byte_wr(tr_ar | (tr_invalid << 16));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2009-07-21 15:56:26 +04:00
|
|
|
|
VMwrite16(VMCS_16BIT_GUEST_TR_SELECTOR, tr_selector);
|
2012-10-26 22:43:53 +04:00
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS, tr_ar);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_TR_BASE, tr_base);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_TR_LIMIT, tr_limit);
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_GDTR_BASE, BX_CPU_THIS_PTR gdtr.base);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_GDTR_LIMIT, BX_CPU_THIS_PTR gdtr.limit);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_IDTR_BASE, BX_CPU_THIS_PTR idtr.base);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_IDTR_LIMIT, BX_CPU_THIS_PTR idtr.limit);
|
|
|
|
|
|
2011-07-22 00:58:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_IA32_SYSENTER_ESP_MSR, BX_CPU_THIS_PTR msr.sysenter_esp_msr);
|
|
|
|
|
VMwrite_natural(VMCS_GUEST_IA32_SYSENTER_EIP_MSR, BX_CPU_THIS_PTR msr.sysenter_eip_msr);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_IA32_SYSENTER_CS_MSR, BX_CPU_THIS_PTR msr.sysenter_cs_msr);
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2010-03-27 00:26:08 +03:00
|
|
|
|
if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_PAT_MSR)
|
2015-02-24 00:17:33 +03:00
|
|
|
|
VMwrite64(VMCS_64BIT_GUEST_IA32_PAT, BX_CPU_THIS_PTR msr.pat.u64);
|
2010-04-02 12:03:04 +04:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
2012-03-14 23:17:27 +04:00
|
|
|
|
if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_EFER_MSR)
|
2010-03-27 12:27:40 +03:00
|
|
|
|
VMwrite64(VMCS_64BIT_GUEST_IA32_EFER, BX_CPU_THIS_PTR efer.get32());
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#endif
|
2010-04-02 12:03:04 +04:00
|
|
|
|
#endif
|
2010-03-27 12:27:40 +03:00
|
|
|
|
|
2012-07-11 19:07:54 +04:00
|
|
|
|
Bit32u tmpDR6 = BX_CPU_THIS_PTR debug_trap & 0x0000400f;
|
2009-02-01 23:47:06 +03:00
|
|
|
|
if (tmpDR6 & 0xf) tmpDR6 |= (1 << 12);
|
2012-07-11 19:07:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_GUEST_PENDING_DBG_EXCEPTIONS, tmpDR6);
|
2013-04-09 19:43:15 +04:00
|
|
|
|
|
|
|
|
|
// effectively wakeup from MWAIT state on VMEXIT
|
|
|
|
|
if (BX_CPU_THIS_PTR activity_state >= BX_VMX_LAST_ACTIVITY_STATE)
|
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_ACTIVITY_STATE, BX_ACTIVITY_STATE_ACTIVE);
|
|
|
|
|
else
|
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_ACTIVITY_STATE, BX_CPU_THIS_PTR activity_state);
|
2009-02-01 23:47:06 +03:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
Bit32u interruptibility_state = 0;
|
2011-12-21 10:17:45 +04:00
|
|
|
|
if (interrupts_inhibited(BX_INHIBIT_INTERRUPTS)) {
|
|
|
|
|
if (interrupts_inhibited(BX_INHIBIT_DEBUG))
|
2009-01-31 13:43:24 +03:00
|
|
|
|
interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_BY_MOV_SS;
|
|
|
|
|
else
|
|
|
|
|
interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_BY_STI;
|
|
|
|
|
}
|
2013-03-06 01:12:43 +04:00
|
|
|
|
|
2012-09-25 13:35:38 +04:00
|
|
|
|
if (is_masked_event(BX_EVENT_SMI))
|
2009-01-31 13:43:24 +03:00
|
|
|
|
interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_SMI_BLOCKED;
|
2013-03-06 01:12:43 +04:00
|
|
|
|
|
|
|
|
|
if (vm->vmexec_ctrls1 & VMX_VM_EXEC_CTRL1_VIRTUAL_NMI) {
|
|
|
|
|
if (is_masked_event(BX_EVENT_VMX_VIRTUAL_NMI))
|
|
|
|
|
interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
if (is_masked_event(BX_EVENT_NMI))
|
|
|
|
|
interruptibility_state |= BX_VMX_INTERRUPTS_BLOCKED_NMI_BLOCKED;
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_INTERRUPTIBILITY_STATE, interruptibility_state);
|
2010-04-09 15:31:55 +04:00
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2010-04-09 15:34:52 +04:00
|
|
|
|
if (VMX_MSR_MISC & VMX_MISC_STORE_LMA_TO_X86_64_GUEST_VMENTRY_CONTROL) {
|
2010-04-09 15:31:55 +04:00
|
|
|
|
// VMEXITs store the value of EFER.LMA into the <20>x86-64 guest" VMENTRY control
|
|
|
|
|
// must be set if unrestricted guest is supported
|
|
|
|
|
if (long_mode())
|
|
|
|
|
vm->vmentry_ctrls |= VMX_VMENTRY_CTRL1_X86_64_GUEST;
|
|
|
|
|
else
|
|
|
|
|
vm->vmentry_ctrls &= ~VMX_VMENTRY_CTRL1_X86_64_GUEST;
|
|
|
|
|
|
|
|
|
|
VMwrite32(VMCS_32BIT_CONTROL_VMENTRY_CONTROLS, vm->vmentry_ctrls);
|
|
|
|
|
}
|
2011-07-03 19:59:48 +04:00
|
|
|
|
|
|
|
|
|
// Deactivate VMX preemtion timer
|
|
|
|
|
BX_CPU_THIS_PTR lapic.deactivate_vmx_preemption_timer();
|
2012-09-25 13:35:38 +04:00
|
|
|
|
clear_event(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED);
|
2011-07-03 19:59:48 +04:00
|
|
|
|
// Store back to VMCS
|
|
|
|
|
if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_VMX_PREEMPTION_TIMER)
|
|
|
|
|
VMwrite32(VMCS_32BIT_GUEST_PREEMPTION_TIMER_VALUE, BX_CPU_THIS_PTR lapic.read_vmx_preemption_timer());
|
2012-10-26 22:43:53 +04:00
|
|
|
|
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VIRTUAL_INT_DELIVERY) {
|
|
|
|
|
VMwrite16(VMCS_16BIT_GUEST_INTERRUPT_STATUS, (((Bit16u) vm->svi) << 8) | vm->rvi);
|
|
|
|
|
}
|
2015-05-06 22:55:44 +03:00
|
|
|
|
|
|
|
|
|
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_PML_ENABLE) {
|
|
|
|
|
VMwrite16(VMCS_16BIT_GUEST_PML_INDEX, vm->pml_index);
|
|
|
|
|
}
|
2010-04-09 15:31:55 +04:00
|
|
|
|
#endif
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BX_CPU_C::VMexitLoadHostState(void)
|
|
|
|
|
{
|
|
|
|
|
VMCS_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcs.host_state;
|
|
|
|
|
bx_bool x86_64_host = 0;
|
|
|
|
|
Bit32u vmexit_ctrls = BX_CPU_THIS_PTR vmcs.vmexit_ctrls;
|
2010-04-02 12:03:04 +04:00
|
|
|
|
|
2011-12-25 23:53:23 +04:00
|
|
|
|
BX_CPU_THIS_PTR tsc_offset = 0;
|
|
|
|
|
|
2010-04-02 12:03:04 +04:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (vmexit_ctrls & VMX_VMEXIT_CTRL1_HOST_ADDR_SPACE_SIZE) {
|
|
|
|
|
BX_DEBUG(("VMEXIT to x86-64 host"));
|
|
|
|
|
x86_64_host = 1;
|
|
|
|
|
}
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2012-03-14 23:17:27 +04:00
|
|
|
|
// modify EFER.LMA / EFER.LME before setting CR4
|
2010-03-27 12:27:40 +03:00
|
|
|
|
if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_EFER_MSR) {
|
2011-12-10 22:58:25 +04:00
|
|
|
|
BX_CPU_THIS_PTR efer.set32((Bit32u) host_state->efer_msr);
|
2010-03-27 12:27:40 +03:00
|
|
|
|
}
|
2010-04-03 11:30:23 +04:00
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
{
|
2010-03-27 12:27:40 +03:00
|
|
|
|
if (x86_64_host)
|
2011-08-11 02:04:33 +04:00
|
|
|
|
BX_CPU_THIS_PTR efer.set32(BX_CPU_THIS_PTR efer.get32() | (BX_EFER_LME_MASK | BX_EFER_LMA_MASK));
|
2010-03-27 12:27:40 +03:00
|
|
|
|
else
|
2011-08-11 02:04:33 +04:00
|
|
|
|
BX_CPU_THIS_PTR efer.set32(BX_CPU_THIS_PTR efer.get32() & ~(BX_EFER_LME_MASK | BX_EFER_LMA_MASK));
|
2010-03-27 12:27:40 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
// ET, CD, NW, 28:19, 17, 15:6, and VMX fixed bits not modified Section 19.8
|
2011-12-26 20:33:13 +04:00
|
|
|
|
host_state->cr0 = (BX_CPU_THIS_PTR cr0.get32() & VMX_KEEP_CR0_BITS) | (host_state->cr0 & ~VMX_KEEP_CR0_BITS);
|
2010-03-26 00:33:07 +03:00
|
|
|
|
|
|
|
|
|
if (! check_CR0(host_state->cr0)) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_PANIC(("VMEXIT CR0 is broken !"));
|
|
|
|
|
}
|
2010-03-26 00:33:07 +03:00
|
|
|
|
if (! check_CR4(host_state->cr4)) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_PANIC(("VMEXIT CR4 is broken !"));
|
|
|
|
|
}
|
2010-03-26 00:33:07 +03:00
|
|
|
|
|
2011-12-10 22:58:25 +04:00
|
|
|
|
BX_CPU_THIS_PTR cr0.set32((Bit32u) host_state->cr0);
|
|
|
|
|
BX_CPU_THIS_PTR cr4.set32((Bit32u) host_state->cr4);
|
2010-04-04 13:04:12 +04:00
|
|
|
|
BX_CPU_THIS_PTR cr3 = host_state->cr3;
|
2010-03-26 00:33:07 +03:00
|
|
|
|
|
2010-04-03 23:18:38 +04:00
|
|
|
|
if (! x86_64_host && BX_CPU_THIS_PTR cr4.get_PAE()) {
|
|
|
|
|
if (! CheckPDPTR(host_state->cr3)) {
|
|
|
|
|
BX_ERROR(("VMABORT: host PDPTRs are corrupted !"));
|
|
|
|
|
VMabort(VMABORT_HOST_PDPTR_CORRUPTED);
|
|
|
|
|
}
|
2010-03-26 00:33:07 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2011-03-15 23:20:15 +03:00
|
|
|
|
BX_CPU_THIS_PTR dr7.set32(0x00000400);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR msr.sysenter_cs_msr = host_state->sysenter_cs_msr;
|
|
|
|
|
BX_CPU_THIS_PTR msr.sysenter_esp_msr = host_state->sysenter_esp_msr;
|
|
|
|
|
BX_CPU_THIS_PTR msr.sysenter_eip_msr = host_state->sysenter_eip_msr;
|
|
|
|
|
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2010-03-27 00:26:08 +03:00
|
|
|
|
if (vmexit_ctrls & VMX_VMEXIT_CTRL1_LOAD_PAT_MSR) {
|
|
|
|
|
BX_CPU_THIS_PTR msr.pat = host_state->pat_msr;
|
|
|
|
|
}
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#endif
|
2010-03-27 00:26:08 +03:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
// CS selector loaded from VMCS
|
|
|
|
|
// valid <= 1
|
|
|
|
|
// base <= 0
|
|
|
|
|
// limit <= 0xffffffff, g <= 1
|
|
|
|
|
// present <= 1
|
|
|
|
|
// dpl <= 0
|
|
|
|
|
// type <= segment, BX_CODE_EXEC_READ_ACCESSED
|
|
|
|
|
// d_b <= loaded from 'host-address space size' VMEXIT control
|
|
|
|
|
// l <= loaded from 'host-address space size' VMEXIT control
|
|
|
|
|
|
|
|
|
|
parse_selector(host_state->segreg_selector[BX_SEG_REG_CS],
|
|
|
|
|
&BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector);
|
|
|
|
|
|
2015-01-25 23:55:10 +03:00
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.type = BX_CODE_EXEC_READ_ACCESSED;
|
|
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xffffffff;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.avl = 0;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.g = 1; /* page granular */
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.d_b = !x86_64_host;
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.l = x86_64_host;
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
// DATA selector loaded from VMCS
|
|
|
|
|
// valid <= if selector is not all-zero
|
|
|
|
|
// base <= 0
|
|
|
|
|
// limit <= 0xffffffff, g <= 1
|
|
|
|
|
// present <= 1
|
|
|
|
|
// dpl <= 0
|
|
|
|
|
// type <= segment, BX_DATA_READ_WRITE_ACCESSED
|
|
|
|
|
// d_b <= 1
|
|
|
|
|
// l <= 0
|
|
|
|
|
|
|
|
|
|
for (unsigned segreg = 0; segreg < 6; segreg++)
|
|
|
|
|
{
|
|
|
|
|
if (segreg == BX_SEG_REG_CS) continue;
|
|
|
|
|
|
|
|
|
|
parse_selector(host_state->segreg_selector[segreg],
|
|
|
|
|
&BX_CPU_THIS_PTR sregs[segreg].selector);
|
|
|
|
|
|
|
|
|
|
if (! host_state->segreg_selector[segreg]) {
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.valid = 0;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.valid = SegValidCache;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.p = 1;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.dpl = 0;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.segment = 1; /* data/code segment */
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.type = BX_DATA_READ_WRITE_ACCESSED;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.base = 0;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.limit_scaled = 0xffffffff;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.avl = 0;
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.g = 1; /* page granular */
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.d_b = 1;
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
BX_CPU_THIS_PTR sregs[segreg].cache.u.segment.l = 0;
|
|
|
|
|
#endif
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SS.DPL always clear
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 0;
|
|
|
|
|
|
|
|
|
|
if (x86_64_host || BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].cache.valid)
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].cache.u.segment.base = host_state->fs_base;
|
|
|
|
|
|
|
|
|
|
if (x86_64_host || BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].cache.valid)
|
|
|
|
|
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].cache.u.segment.base = host_state->gs_base;
|
|
|
|
|
|
|
|
|
|
// TR selector loaded from VMCS
|
|
|
|
|
parse_selector(host_state->tr_selector, &BX_CPU_THIS_PTR tr.selector);
|
|
|
|
|
|
2015-01-25 23:55:10 +03:00
|
|
|
|
BX_CPU_THIS_PTR tr.cache.valid = SegValidCache; /* valid */
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_CPU_THIS_PTR tr.cache.p = 1; /* present */
|
|
|
|
|
BX_CPU_THIS_PTR tr.cache.dpl = 0; /* field not used */
|
|
|
|
|
BX_CPU_THIS_PTR tr.cache.segment = 0; /* system segment */
|
|
|
|
|
BX_CPU_THIS_PTR tr.cache.type = BX_SYS_SEGMENT_BUSY_386_TSS;
|
2009-04-05 23:09:44 +04:00
|
|
|
|
BX_CPU_THIS_PTR tr.cache.u.segment.base = host_state->tr_base;
|
|
|
|
|
BX_CPU_THIS_PTR tr.cache.u.segment.limit_scaled = 0x67;
|
|
|
|
|
BX_CPU_THIS_PTR tr.cache.u.segment.avl = 0;
|
|
|
|
|
BX_CPU_THIS_PTR tr.cache.u.segment.g = 0; /* byte granular */
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
// unusable LDTR
|
|
|
|
|
BX_CPU_THIS_PTR ldtr.selector.value = 0x0000;
|
|
|
|
|
BX_CPU_THIS_PTR ldtr.selector.index = 0x0000;
|
|
|
|
|
BX_CPU_THIS_PTR ldtr.selector.ti = 0;
|
|
|
|
|
BX_CPU_THIS_PTR ldtr.selector.rpl = 0;
|
|
|
|
|
BX_CPU_THIS_PTR ldtr.cache.valid = 0; /* invalid */
|
|
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR gdtr.base = host_state->gdtr_base;
|
|
|
|
|
BX_CPU_THIS_PTR gdtr.limit = 0xFFFF;
|
|
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR idtr.base = host_state->idtr_base;
|
|
|
|
|
BX_CPU_THIS_PTR idtr.limit = 0xFFFF;
|
|
|
|
|
|
2012-01-02 00:26:23 +04:00
|
|
|
|
RIP = BX_CPU_THIS_PTR prev_rip = host_state->rip;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
RSP = host_state->rsp;
|
|
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR inhibit_mask = 0;
|
|
|
|
|
BX_CPU_THIS_PTR debug_trap = 0;
|
|
|
|
|
|
2011-09-13 23:38:09 +04:00
|
|
|
|
// set flags directly, avoid setEFlags side effects
|
|
|
|
|
BX_CPU_THIS_PTR eflags = 0x2; // Bit1 is always set
|
|
|
|
|
// Update lazy flags state
|
2014-10-22 21:49:12 +04:00
|
|
|
|
clearEFlagsOSZAPC();
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-04-09 19:43:15 +04:00
|
|
|
|
BX_CPU_THIS_PTR activity_state = BX_ACTIVITY_STATE_ACTIVE;
|
|
|
|
|
|
2012-03-25 15:54:32 +04:00
|
|
|
|
handleCpuContextChange();
|
2011-03-23 01:18:40 +03:00
|
|
|
|
|
2012-05-20 00:36:40 +04:00
|
|
|
|
#if BX_SUPPORT_MONITOR_MWAIT
|
|
|
|
|
BX_CPU_THIS_PTR monitor.reset_monitor();
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-03-23 01:18:40 +03:00
|
|
|
|
BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_CONTEXT_SWITCH, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2012-07-26 20:03:26 +04:00
|
|
|
|
void BX_CPU_C::VMexit(Bit32u reason, Bit64u qualification)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
|
|
|
|
|
2010-03-16 01:58:41 +03:00
|
|
|
|
if (!BX_CPU_THIS_PTR in_vmx || !BX_CPU_THIS_PTR in_vmx_guest) {
|
2010-03-16 18:11:03 +03:00
|
|
|
|
if ((reason & 0x80000000) == 0)
|
|
|
|
|
BX_PANIC(("PANIC: VMEXIT not in VMX guest mode !"));
|
2010-03-16 01:58:41 +03:00
|
|
|
|
}
|
|
|
|
|
|
2013-10-24 01:18:19 +04:00
|
|
|
|
BX_INSTR_VMEXIT(BX_CPU_ID, reason, qualification);
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
//
|
|
|
|
|
// STEP 0: Update VMEXIT reason
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
VMwrite32(VMCS_32BIT_VMEXIT_REASON, reason);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
VMwrite_natural(VMCS_VMEXIT_QUALIFICATION, qualification);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2012-07-26 20:03:26 +04:00
|
|
|
|
// clipping with 0xf not really necessary but keep it for safety
|
|
|
|
|
VMwrite32(VMCS_32BIT_VMEXIT_INSTRUCTION_LENGTH, (RIP-BX_CPU_THIS_PTR prev_rip) & 0xf);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
reason &= 0xffff; /* keep only basic VMEXIT reason */
|
|
|
|
|
|
2012-10-04 20:15:58 +04:00
|
|
|
|
if (reason >= VMX_VMEXIT_LAST_REASON)
|
|
|
|
|
BX_PANIC(("PANIC: broken VMEXIT reason %d", reason));
|
|
|
|
|
else
|
2014-01-24 22:58:57 +04:00
|
|
|
|
BX_DEBUG(("VMEXIT reason = %d (%s) qualification=0x" FMT_LL "x", reason, VMX_vmexit_reason_name[reason], qualification));
|
2012-10-04 20:15:58 +04:00
|
|
|
|
|
2010-03-27 14:14:19 +03:00
|
|
|
|
if (reason != VMX_VMEXIT_EXCEPTION_NMI && reason != VMX_VMEXIT_EXTERNAL_INTERRUPT) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMwrite32(VMCS_32BIT_VMEXIT_INTERRUPTION_INFO, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_event) {
|
|
|
|
|
VMwrite32(VMCS_32BIT_IDT_VECTORING_INFO, vm->idt_vector_info | 0x80000000);
|
|
|
|
|
VMwrite32(VMCS_32BIT_IDT_VECTORING_ERR_CODE, vm->idt_vector_error_code);
|
|
|
|
|
BX_CPU_THIS_PTR in_event = 0;
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
VMwrite32(VMCS_32BIT_IDT_VECTORING_INFO, 0);
|
|
|
|
|
}
|
|
|
|
|
|
2013-03-07 01:11:23 +04:00
|
|
|
|
BX_CPU_THIS_PTR nmi_unblocking_iret = 0;
|
|
|
|
|
|
2012-07-26 20:03:26 +04:00
|
|
|
|
// VMEXITs are FAULT-like: restore RIP/RSP to value before VMEXIT occurred
|
2012-10-26 22:43:53 +04:00
|
|
|
|
if (! IS_TRAP_LIKE_VMEXIT(reason)) {
|
|
|
|
|
RIP = BX_CPU_THIS_PTR prev_rip;
|
|
|
|
|
if (BX_CPU_THIS_PTR speculative_rsp)
|
|
|
|
|
RSP = BX_CPU_THIS_PTR prev_rsp;
|
|
|
|
|
}
|
2013-10-24 01:18:19 +04:00
|
|
|
|
BX_CPU_THIS_PTR speculative_rsp = 0;
|
2012-07-26 20:03:26 +04:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
//
|
|
|
|
|
// STEP 1: Saving Guest State to VMCS
|
|
|
|
|
//
|
|
|
|
|
if (reason != VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE && reason != VMX_VMEXIT_VMENTRY_FAILURE_MSR) {
|
|
|
|
|
// clear VMENTRY interruption info field
|
|
|
|
|
VMwrite32(VMCS_32BIT_CONTROL_VMENTRY_INTERRUPTION_INFO, vm->vmentry_interr_info & ~0x80000000);
|
|
|
|
|
|
|
|
|
|
VMexitSaveGuestState();
|
|
|
|
|
|
|
|
|
|
Bit32u msr = StoreMSRs(vm->vmexit_msr_store_cnt, vm->vmexit_msr_store_addr);
|
|
|
|
|
if (msr) {
|
|
|
|
|
BX_ERROR(("VMABORT: Error when saving guest MSR number %d", msr));
|
|
|
|
|
VMabort(VMABORT_SAVING_GUEST_MSRS_FAILURE);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2010-04-07 21:12:17 +04:00
|
|
|
|
BX_CPU_THIS_PTR in_vmx_guest = 0;
|
|
|
|
|
|
2012-09-25 13:35:38 +04:00
|
|
|
|
// entering VMX root mode: clear possibly pending guest VMX events
|
2012-10-07 13:16:13 +04:00
|
|
|
|
clear_event(BX_EVENT_VMX_VTPR_UPDATE |
|
2012-10-26 22:43:53 +04:00
|
|
|
|
BX_EVENT_VMX_VEOI_UPDATE |
|
|
|
|
|
BX_EVENT_VMX_VIRTUAL_APIC_WRITE |
|
2012-10-07 13:16:13 +04:00
|
|
|
|
BX_EVENT_VMX_MONITOR_TRAP_FLAG |
|
2012-10-06 00:48:22 +04:00
|
|
|
|
BX_EVENT_VMX_INTERRUPT_WINDOW_EXITING |
|
2012-09-25 13:35:38 +04:00
|
|
|
|
BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED |
|
2013-03-06 01:12:43 +04:00
|
|
|
|
BX_EVENT_VMX_VIRTUAL_NMI |
|
2012-10-26 22:43:53 +04:00
|
|
|
|
BX_EVENT_PENDING_VMX_VIRTUAL_INTR);
|
2012-09-25 13:35:38 +04:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
//
|
|
|
|
|
// STEP 2: Load Host State
|
|
|
|
|
//
|
|
|
|
|
VMexitLoadHostState();
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// STEP 3: Load Host MSR registers
|
|
|
|
|
//
|
|
|
|
|
|
|
|
|
|
Bit32u msr = LoadMSRs(vm->vmexit_msr_load_cnt, vm->vmexit_msr_load_addr);
|
|
|
|
|
if (msr) {
|
|
|
|
|
BX_ERROR(("VMABORT: Error when loading host MSR number %d", msr));
|
|
|
|
|
VMabort(VMABORT_LOADING_HOST_MSRS);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// STEP 4: Go back to VMX host
|
|
|
|
|
//
|
|
|
|
|
|
2012-09-25 13:35:38 +04:00
|
|
|
|
mask_event(BX_EVENT_INIT); // INIT is disabled in VMX root mode
|
|
|
|
|
|
2010-04-19 15:09:35 +04:00
|
|
|
|
BX_CPU_THIS_PTR EXT = 0;
|
2012-10-05 01:30:50 +04:00
|
|
|
|
BX_CPU_THIS_PTR last_exception_type = 0;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2011-05-06 12:19:03 +04:00
|
|
|
|
#if BX_DEBUGGER
|
|
|
|
|
if (BX_CPU_THIS_PTR vmexit_break) {
|
|
|
|
|
BX_CPU_THIS_PTR stop_reason = STOP_VMEXIT_BREAK_POINT;
|
|
|
|
|
bx_debug_break(); // trap into debugger
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2012-10-26 22:43:53 +04:00
|
|
|
|
if (! IS_TRAP_LIKE_VMEXIT(reason)) {
|
2012-10-07 13:16:13 +04:00
|
|
|
|
longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2009-05-28 12:26:17 +04:00
|
|
|
|
#endif // BX_SUPPORT_VMX
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
// VMX instructions
|
|
|
|
|
////////////////////////////////////////////////////////////
|
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMXON(bxInstruction_c *i)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|
if (! BX_CPU_THIS_PTR cr4.get_VMXE() || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx) {
|
|
|
|
|
if (CPL != 0 || ! BX_CPU_THIS_PTR cr0.get_NE() ||
|
2010-03-06 19:59:05 +03:00
|
|
|
|
! (BX_CPU_THIS_PTR cr0.get_PE()) || BX_GET_ENABLE_A20() == 0 ||
|
|
|
|
|
! (BX_CPU_THIS_PTR msr.ia32_feature_ctrl & BX_IA32_FEATURE_CONTROL_LOCK_BIT) ||
|
|
|
|
|
! (BX_CPU_THIS_PTR msr.ia32_feature_ctrl & BX_IA32_FEATURE_CONTROL_VMX_ENABLE_BIT))
|
|
|
|
|
{
|
|
|
|
|
BX_ERROR(("#GP: VMXON is not allowed !"));
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-03-06 19:59:05 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2015-05-17 00:06:59 +03:00
|
|
|
|
bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
Bit64u pAddr = read_virtual_qword(i->seg(), eaddr); // keep 64-bit
|
2012-12-27 01:59:16 +04:00
|
|
|
|
if (! IsValidPageAlignedPhyAddr(pAddr)) {
|
2010-03-06 19:59:05 +03:00
|
|
|
|
BX_ERROR(("VMXON: invalid or not page aligned physical address !"));
|
|
|
|
|
VMfailInvalid();
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
// not allowed to be shadow VMCS
|
2009-06-06 14:21:49 +04:00
|
|
|
|
Bit32u revision = VMXReadRevisionID((bx_phy_address) pAddr);
|
2015-07-06 21:46:57 +03:00
|
|
|
|
if (revision != BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()) {
|
|
|
|
|
BX_ERROR(("VMXON: not expected (%d != %d) VMCS revision id !", revision, BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()));
|
2010-03-06 19:59:05 +03:00
|
|
|
|
VMfailInvalid();
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR vmcsptr = BX_INVALID_VMCSPTR;
|
2009-05-03 17:02:14 +04:00
|
|
|
|
BX_CPU_THIS_PTR vmcshostptr = 0;
|
2009-05-28 12:26:17 +04:00
|
|
|
|
BX_CPU_THIS_PTR vmxonptr = pAddr;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_CPU_THIS_PTR in_vmx = 1;
|
2012-09-25 13:35:38 +04:00
|
|
|
|
mask_event(BX_EVENT_INIT); // INIT is disabled in VMX root mode
|
2009-01-31 13:43:24 +03:00
|
|
|
|
// block and disable A20M;
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_MONITOR_MWAIT
|
|
|
|
|
BX_CPU_THIS_PTR monitor.reset_monitor();
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
VMsucceed();
|
|
|
|
|
}
|
2011-06-28 20:04:40 +04:00
|
|
|
|
else if (BX_CPU_THIS_PTR in_vmx_guest) { // in VMX non-root operation
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_VMXON);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// in VMX root operation mode
|
2010-03-16 17:51:20 +03:00
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
VMfail(VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMXOFF(bxInstruction_c *i)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
If CR0.PE = 0, the following instructions cause invalid-opcode exceptions and
do not cause VM exits: INVEPT, INVVPID, LLDT, LTR, SLDT, STR, VMCLEAR,
VMLAUNCH, VMPTRLD, VMPTRST, VMREAD, VMRESUME, VMWRITE, VMXOFF,
and VMXON.
2010-04-08 21:00:55 +04:00
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2012-07-26 20:03:26 +04:00
|
|
|
|
VMexit(VMX_VMEXIT_VMXOFF, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2010-03-16 17:51:20 +03:00
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
if dual-monitor treatment of SMIs and SMM is active
|
|
|
|
|
THEN VMfail(VMXERR_VMXOFF_WITH_CONFIGURED_SMM_MONITOR);
|
|
|
|
|
else
|
|
|
|
|
*/
|
|
|
|
|
{
|
2009-05-28 12:26:17 +04:00
|
|
|
|
BX_CPU_THIS_PTR vmxonptr = BX_INVALID_VMCSPTR;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_CPU_THIS_PTR in_vmx = 0; // leave VMX operation mode
|
2012-09-25 13:35:38 +04:00
|
|
|
|
unmask_event(BX_EVENT_INIT);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
// unblock and enable A20M;
|
|
|
|
|
#if BX_SUPPORT_MONITOR_MWAIT
|
|
|
|
|
BX_CPU_THIS_PTR monitor.reset_monitor();
|
|
|
|
|
#endif
|
|
|
|
|
VMsucceed();
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMCALL(bxInstruction_c *i)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx)
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2012-07-26 20:03:26 +04:00
|
|
|
|
VMexit(VMX_VMEXIT_VMCALL, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR get_VM() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2010-03-16 17:51:20 +03:00
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_smm /*||
|
|
|
|
|
(the logical processor does not support the dual-monitor treatment of SMIs and SMM) ||
|
|
|
|
|
(the valid bit in the IA32_SMM_MONITOR_CTL MSR is clear)*/)
|
|
|
|
|
{
|
|
|
|
|
VMfail(VMXERR_VMCALL_IN_VMX_ROOT_OPERATION);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
/*
|
|
|
|
|
if dual-monitor treatment of SMIs and BX_CPU_THIS_PTR in_smm
|
|
|
|
|
THEN perform an SMM VMexit (see Section 24.16.2
|
|
|
|
|
of the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B);
|
|
|
|
|
*/
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (BX_CPU_THIS_PTR vmcsptr == BX_INVALID_VMCSPTR) {
|
2009-05-28 12:26:17 +04:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCALL with invalid VMCS ptr"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMfailInvalid();
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
Bit32u launch_state = VMread32(VMCS_LAUNCH_STATE_FIELD_ENCODING);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (launch_state != VMCS_STATE_CLEAR) {
|
2009-05-28 12:26:17 +04:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCALL with launched VMCS"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMfail(VMXERR_VMCALL_NON_CLEAR_VMCS);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BX_PANIC(("VMCALL: not implemented yet"));
|
|
|
|
|
/*
|
2012-01-12 00:21:29 +04:00
|
|
|
|
if VM-exit control fields are not valid (see Section 24.16.6.1 of the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B)
|
|
|
|
|
THEN VMfail(VMXERR_VMCALL_INVALID_VMEXIT_FIELD);
|
|
|
|
|
else
|
|
|
|
|
enter SMM;
|
|
|
|
|
read revision identifier in MSEG;
|
|
|
|
|
if revision identifier does not match that supported by processor
|
|
|
|
|
THEN
|
|
|
|
|
leave SMM;
|
|
|
|
|
VMfailValid(VMXERR_VMCALL_INVALID_MSEG_REVISION_ID);
|
|
|
|
|
else
|
|
|
|
|
read SMM-monitor features field in MSEG (see Section 24.16.6.2,
|
|
|
|
|
in the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B);
|
|
|
|
|
if features field is invalid
|
|
|
|
|
THEN
|
|
|
|
|
leave SMM;
|
|
|
|
|
VMfailValid(VMXERR_VMCALL_WITH_INVALID_SMM_MONITOR_FEATURES);
|
|
|
|
|
else activate dual-monitor treatment of SMIs and SMM (see Section 24.16.6
|
|
|
|
|
in the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B);
|
|
|
|
|
FI;
|
|
|
|
|
FI;
|
|
|
|
|
FI;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
*/
|
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMLAUNCH(bxInstruction_c *i)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
If CR0.PE = 0, the following instructions cause invalid-opcode exceptions and
do not cause VM exits: INVEPT, INVVPID, LLDT, LTR, SLDT, STR, VMCLEAR,
VMLAUNCH, VMPTRLD, VMPTRST, VMREAD, VMRESUME, VMWRITE, VMXOFF,
and VMXON.
2010-04-08 21:00:55 +04:00
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
unsigned vmlaunch = 0;
|
2011-12-19 01:04:30 +04:00
|
|
|
|
if ((i->getIaOpcode() == BX_IA_VMLAUNCH)) {
|
2012-01-05 23:42:58 +04:00
|
|
|
|
BX_DEBUG(("VMLAUNCH VMCS ptr: 0x" FMT_ADDRX64, BX_CPU_THIS_PTR vmcsptr));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
vmlaunch = 1;
|
|
|
|
|
}
|
2011-12-19 01:04:30 +04:00
|
|
|
|
else {
|
2012-01-05 23:42:58 +04:00
|
|
|
|
BX_DEBUG(("VMRESUME VMCS ptr: 0x" FMT_ADDRX64, BX_CPU_THIS_PTR vmcsptr));
|
2011-12-19 01:04:30 +04:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2012-07-26 20:03:26 +04:00
|
|
|
|
VMexit(vmlaunch ? VMX_VMEXIT_VMLAUNCH : VMX_VMEXIT_VMRESUME, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2010-03-16 17:51:20 +03:00
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (BX_CPU_THIS_PTR vmcsptr == BX_INVALID_VMCSPTR) {
|
2009-05-28 12:26:17 +04:00
|
|
|
|
BX_ERROR(("VMFAIL: VMLAUNCH with invalid VMCS ptr !"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMfailInvalid();
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-12-21 10:17:45 +04:00
|
|
|
|
if (interrupts_inhibited(BX_INHIBIT_INTERRUPTS_BY_MOVSS)) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_ERROR(("VMFAIL: VMLAUNCH with interrupts blocked by MOV_SS !"));
|
|
|
|
|
VMfail(VMXERR_VMENTRY_MOV_SS_BLOCKING);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
Bit32u launch_state = VMread32(VMCS_LAUNCH_STATE_FIELD_ENCODING);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (vmlaunch) {
|
|
|
|
|
if (launch_state != VMCS_STATE_CLEAR) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMLAUNCH with non-clear VMCS!"));
|
|
|
|
|
VMfail(VMXERR_VMLAUNCH_NON_CLEAR_VMCS);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
if (launch_state != VMCS_STATE_LAUNCHED) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMRESUME with non-launched VMCS!"));
|
|
|
|
|
VMfail(VMXERR_VMRESUME_NON_LAUNCHED_VMCS);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////
|
|
|
|
|
// STEP 1: Load and Check VM-Execution Control Fields
|
|
|
|
|
// STEP 2: Load and Check VM-Exit Control Fields
|
|
|
|
|
// STEP 3: Load and Check VM-Entry Control Fields
|
|
|
|
|
///////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
VMX_error_code error = VMenterLoadCheckVmControls();
|
|
|
|
|
if (error != VMXERR_NO_ERROR) {
|
|
|
|
|
VMfail(error);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////
|
|
|
|
|
// STEP 4: Load and Check Host State
|
|
|
|
|
///////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
error = VMenterLoadCheckHostState();
|
|
|
|
|
if (error != VMXERR_NO_ERROR) {
|
|
|
|
|
VMfail(error);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////
|
|
|
|
|
// STEP 5: Load and Check Guest State
|
|
|
|
|
///////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
Bit64u qualification = VMENTER_ERR_NO_ERROR;
|
|
|
|
|
Bit32u state_load_error = VMenterLoadCheckGuestState(&qualification);
|
|
|
|
|
if (state_load_error) {
|
|
|
|
|
BX_ERROR(("VMEXIT: Guest State Checks Failed"));
|
2012-07-26 20:03:26 +04:00
|
|
|
|
VMexit(VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE | (1 << 31), qualification);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit32u msr = LoadMSRs(BX_CPU_THIS_PTR vmcs.vmentry_msr_load_cnt, BX_CPU_THIS_PTR vmcs.vmentry_msr_load_addr);
|
|
|
|
|
if (msr) {
|
|
|
|
|
BX_ERROR(("VMEXIT: Error when loading guest MSR 0x%08x", msr));
|
2012-07-26 20:03:26 +04:00
|
|
|
|
VMexit(VMX_VMEXIT_VMENTRY_FAILURE_MSR | (1 << 31), msr);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
///////////////////////////////////////////////////////
|
|
|
|
|
// STEP 6: Update VMCS 'launched' state
|
|
|
|
|
///////////////////////////////////////////////////////
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
if (vmlaunch) VMwrite32(VMCS_LAUNCH_STATE_FIELD_ENCODING, VMCS_STATE_LAUNCHED);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
/*
|
2012-01-05 01:36:39 +04:00
|
|
|
|
Check settings of VMX controls and host-state area;
|
|
|
|
|
if invalid settings
|
|
|
|
|
THEN VMfailValid(VM entry with invalid VMX-control field(s)) or
|
|
|
|
|
VMfailValid(VM entry with invalid host-state field(s)) or
|
|
|
|
|
VMfailValid(VM entry with invalid executive-VMCS pointer)) or
|
|
|
|
|
VMfailValid(VM entry with non-launched executive VMCS) or
|
|
|
|
|
VMfailValid(VM entry with executive-VMCS pointer not VMXON pointer)
|
|
|
|
|
VMfailValid(VM entry with invalid VM-execution control fields in executive VMCS)
|
|
|
|
|
(as appropriate);
|
|
|
|
|
else
|
|
|
|
|
Attempt to load guest state and PDPTRs as appropriate;
|
|
|
|
|
clear address-range monitoring;
|
|
|
|
|
if failure in checking guest state or PDPTRs
|
|
|
|
|
THEN VM entry fails (see Section 22.7, in the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B);
|
|
|
|
|
else
|
|
|
|
|
Attempt to load MSRs from VM-entry MSR-load area;
|
|
|
|
|
if failure
|
|
|
|
|
THEN VM entry fails (see Section 22.7, in the IntelR 64 and IA-32 Architectures Software Developer's Manual, Volume 3B);
|
|
|
|
|
else {
|
|
|
|
|
if VMLAUNCH
|
|
|
|
|
THEN launch state of VMCS <== "launched";
|
|
|
|
|
if in SMM and "entry to SMM" VM-entry control is 0
|
|
|
|
|
THEN
|
|
|
|
|
if "deactivate dual-monitor treatment" VM-entry control is 0
|
|
|
|
|
THEN SMM-transfer VMCS pointer <== current-VMCS pointer;
|
|
|
|
|
FI;
|
|
|
|
|
if executive-VMCS pointer is VMX pointer
|
|
|
|
|
THEN current-VMCS pointer <== VMCS-link pointer;
|
|
|
|
|
else current-VMCS pointer <== executive-VMCS pointer;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
FI;
|
2012-01-05 01:36:39 +04:00
|
|
|
|
leave SMM;
|
|
|
|
|
FI;
|
|
|
|
|
VMsucceed();
|
|
|
|
|
}
|
|
|
|
|
FI;
|
|
|
|
|
FI;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
BX_CPU_THIS_PTR in_vmx_guest = 1;
|
2012-09-25 13:35:38 +04:00
|
|
|
|
|
|
|
|
|
unmask_event(BX_EVENT_INIT);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2011-12-26 02:09:31 +04:00
|
|
|
|
if (VMEXIT(VMX_VM_EXEC_CTRL2_TSC_OFFSET))
|
|
|
|
|
BX_CPU_THIS_PTR tsc_offset = VMread64(VMCS_64BIT_CONTROL_TSC_OFFSET);
|
|
|
|
|
else
|
|
|
|
|
BX_CPU_THIS_PTR tsc_offset = 0;
|
|
|
|
|
|
2011-07-03 19:59:48 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (PIN_VMEXIT(VMX_VM_EXEC_CTRL1_VMX_PREEMPTION_TIMER_VMEXIT)) {
|
|
|
|
|
Bit32u timer_value = VMread32(VMCS_32BIT_GUEST_PREEMPTION_TIMER_VALUE);
|
|
|
|
|
if (timer_value == 0) {
|
2012-09-25 13:35:38 +04:00
|
|
|
|
signal_event(BX_EVENT_VMX_PREEMPTION_TIMER_EXPIRED);
|
2011-07-03 19:59:48 +04:00
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// activate VMX preemption timer
|
2011-12-19 20:06:53 +04:00
|
|
|
|
BX_DEBUG(("VMX preemption timer active"));
|
2011-07-03 19:59:48 +04:00
|
|
|
|
BX_CPU_THIS_PTR lapic.set_vmx_preemption_timer(timer_value);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
///////////////////////////////////////////////////////
|
|
|
|
|
// STEP 7: Inject events to the guest
|
|
|
|
|
///////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
|
|
VMenterInjectEvents();
|
2012-10-26 22:43:53 +04:00
|
|
|
|
|
2012-10-28 20:34:25 +04:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
2012-10-26 22:43:53 +04:00
|
|
|
|
// - When virtual-interrupt-delivery is set this will cause PPR virtualization
|
|
|
|
|
// followed by Virtual Interrupt Evaluation
|
|
|
|
|
// - When use TPR shadow together with Virtualize APIC Access are set this would
|
|
|
|
|
// cause TPR threshold check
|
|
|
|
|
// - When Virtualize APIC Access is disabled the code would pass through TPR
|
|
|
|
|
// threshold check but no VMExit would occur (otherwise VMEntry should fail
|
|
|
|
|
// consistency checks before).
|
|
|
|
|
if (VMEXIT(VMX_VM_EXEC_CTRL2_TPR_SHADOW)) {
|
|
|
|
|
VMX_TPR_Virtualization();
|
|
|
|
|
}
|
2012-10-28 20:34:25 +04:00
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
#endif // BX_SUPPORT_VMX
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMPTRLD(bxInstruction_c *i)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
If CR0.PE = 0, the following instructions cause invalid-opcode exceptions and
do not cause VM exits: INVEPT, INVVPID, LLDT, LTR, SLDT, STR, VMCLEAR,
VMLAUNCH, VMPTRLD, VMPTRST, VMREAD, VMRESUME, VMWRITE, VMXOFF,
and VMXON.
2010-04-08 21:00:55 +04:00
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_VMPTRLD);
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-16 17:51:20 +03:00
|
|
|
|
if (CPL != 0) {
|
2011-09-17 00:06:23 +04:00
|
|
|
|
BX_ERROR(("VMPTRLD with CPL!=0 willcause #GP(0)"));
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2015-05-17 00:06:59 +03:00
|
|
|
|
bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
Bit64u pAddr = read_virtual_qword(i->seg(), eaddr); // keep 64-bit
|
2012-12-27 01:59:16 +04:00
|
|
|
|
if (! IsValidPageAlignedPhyAddr(pAddr)) {
|
2009-05-30 19:09:38 +04:00
|
|
|
|
BX_ERROR(("VMFAIL: invalid or not page aligned physical address !"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMfail(VMXERR_VMPTRLD_INVALID_PHYSICAL_ADDRESS);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2009-05-28 12:26:17 +04:00
|
|
|
|
if (pAddr == BX_CPU_THIS_PTR vmxonptr) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMPTRLD with VMXON ptr !"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMfail(VMXERR_VMPTRLD_WITH_VMXON_PTR);
|
|
|
|
|
}
|
|
|
|
|
else {
|
2009-06-06 14:21:49 +04:00
|
|
|
|
Bit32u revision = VMXReadRevisionID((bx_phy_address) pAddr);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
|
|
|
|
|
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_VMCS_SHADOWING))
|
|
|
|
|
revision &= ~BX_VMCS_SHADOW_BIT_MASK; // allowed to be shadow VMCS
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
if (revision != BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()) {
|
|
|
|
|
BX_ERROR(("VMPTRLD: not expected (%d != %d) VMCS revision id !", revision, BX_CPU_THIS_PTR vmcs_map->get_vmcs_revision_id()));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMfail(VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
|
|
|
|
|
}
|
2009-05-30 19:09:38 +04:00
|
|
|
|
else {
|
|
|
|
|
set_VMCSPTR(pAddr);
|
|
|
|
|
VMsucceed();
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMPTRST(bxInstruction_c *i)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
If CR0.PE = 0, the following instructions cause invalid-opcode exceptions and
do not cause VM exits: INVEPT, INVVPID, LLDT, LTR, SLDT, STR, VMCLEAR,
VMLAUNCH, VMPTRLD, VMPTRST, VMREAD, VMRESUME, VMWRITE, VMXOFF,
and VMXON.
2010-04-08 21:00:55 +04:00
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_VMPTRST);
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-16 17:51:20 +03:00
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2015-05-17 00:06:59 +03:00
|
|
|
|
bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
write_virtual_qword(i->seg(), eaddr, BX_CPU_THIS_PTR vmcsptr);
|
|
|
|
|
VMsucceed();
|
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-22 23:06:20 +04:00
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
Bit64u BX_CPP_AttrRegparmN(1) BX_CPU_C::vmread(unsigned encoding)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
2013-01-21 23:55:00 +04:00
|
|
|
|
unsigned width = VMCS_FIELD_WIDTH(encoding);
|
|
|
|
|
Bit64u field_64;
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if(width == VMCS_FIELD_WIDTH_16BIT) {
|
|
|
|
|
field_64 = VMread16(encoding);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
2013-01-21 23:55:00 +04:00
|
|
|
|
else if(width == VMCS_FIELD_WIDTH_32BIT) {
|
|
|
|
|
// the real hardware write access rights rotated
|
|
|
|
|
if (encoding >= VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS && encoding <= VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS)
|
|
|
|
|
field_64 = vmx_from_ar_byte_rd(VMread32(encoding));
|
|
|
|
|
else
|
|
|
|
|
field_64 = VMread32(encoding);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
}
|
2013-01-21 23:55:00 +04:00
|
|
|
|
else if(width == VMCS_FIELD_WIDTH_64BIT) {
|
|
|
|
|
if (IS_VMCS_FIELD_HI(encoding))
|
|
|
|
|
field_64 = VMread32(encoding);
|
|
|
|
|
else
|
|
|
|
|
field_64 = VMread64(encoding);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
2013-01-21 23:55:00 +04:00
|
|
|
|
else {
|
|
|
|
|
field_64 = VMread_natural(encoding);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
return field_64;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::vmwrite(unsigned encoding, Bit64u val_64)
|
|
|
|
|
{
|
|
|
|
|
unsigned width = VMCS_FIELD_WIDTH(encoding);
|
|
|
|
|
Bit32u val_32 = GET32L(val_64);
|
|
|
|
|
|
|
|
|
|
if(width == VMCS_FIELD_WIDTH_16BIT) {
|
|
|
|
|
VMwrite16(encoding, val_32 & 0xffff);
|
|
|
|
|
}
|
|
|
|
|
else if(width == VMCS_FIELD_WIDTH_32BIT) {
|
|
|
|
|
// the real hardware write access rights rotated
|
|
|
|
|
if (encoding >= VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS && encoding <= VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS)
|
2013-04-09 19:43:15 +04:00
|
|
|
|
VMwrite32(encoding, vmx_from_ar_byte_wr(val_32));
|
2013-01-21 23:55:00 +04:00
|
|
|
|
else
|
|
|
|
|
VMwrite32(encoding, val_32);
|
|
|
|
|
}
|
|
|
|
|
else if(width == VMCS_FIELD_WIDTH_64BIT) {
|
|
|
|
|
if (IS_VMCS_FIELD_HI(encoding))
|
|
|
|
|
VMwrite32(encoding, val_32);
|
|
|
|
|
else
|
|
|
|
|
VMwrite64(encoding, val_64);
|
2010-11-11 19:25:45 +03:00
|
|
|
|
}
|
2013-01-21 23:55:00 +04:00
|
|
|
|
else {
|
|
|
|
|
VMwrite_natural(encoding, (bx_address) val_64);
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-11-11 19:25:45 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
|
|
|
|
|
Bit64u BX_CPP_AttrRegparmN(1) BX_CPU_C::vmread_shadow(unsigned encoding)
|
|
|
|
|
{
|
2010-11-12 23:26:01 +03:00
|
|
|
|
unsigned width = VMCS_FIELD_WIDTH(encoding);
|
2010-11-12 00:41:03 +03:00
|
|
|
|
Bit64u field_64;
|
|
|
|
|
|
2010-11-12 23:26:01 +03:00
|
|
|
|
if(width == VMCS_FIELD_WIDTH_16BIT) {
|
2013-01-21 23:55:00 +04:00
|
|
|
|
field_64 = VMread16_Shadow(encoding);
|
2010-11-12 23:26:01 +03:00
|
|
|
|
}
|
|
|
|
|
else if(width == VMCS_FIELD_WIDTH_32BIT) {
|
|
|
|
|
// the real hardware write access rights rotated
|
|
|
|
|
if (encoding >= VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS && encoding <= VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS)
|
2013-01-21 23:55:00 +04:00
|
|
|
|
field_64 = vmx_from_ar_byte_rd(VMread32_Shadow(encoding));
|
2010-11-12 23:26:01 +03:00
|
|
|
|
else
|
2013-01-21 23:55:00 +04:00
|
|
|
|
field_64 = VMread32_Shadow(encoding);
|
2010-11-12 23:26:01 +03:00
|
|
|
|
}
|
2011-07-22 00:58:54 +04:00
|
|
|
|
else if(width == VMCS_FIELD_WIDTH_64BIT) {
|
2010-11-12 23:26:01 +03:00
|
|
|
|
if (IS_VMCS_FIELD_HI(encoding))
|
2013-01-21 23:55:00 +04:00
|
|
|
|
field_64 = VMread32_Shadow(encoding);
|
2010-11-12 23:26:01 +03:00
|
|
|
|
else
|
2013-01-21 23:55:00 +04:00
|
|
|
|
field_64 = VMread64_Shadow(encoding);
|
2010-11-12 23:26:01 +03:00
|
|
|
|
}
|
2011-07-22 00:58:54 +04:00
|
|
|
|
else {
|
2013-01-21 23:55:00 +04:00
|
|
|
|
field_64 = VMread64_Shadow(encoding);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
return field_64;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void BX_CPP_AttrRegparmN(2) BX_CPU_C::vmwrite_shadow(unsigned encoding, Bit64u val_64)
|
|
|
|
|
{
|
|
|
|
|
unsigned width = VMCS_FIELD_WIDTH(encoding);
|
|
|
|
|
Bit32u val_32 = GET32L(val_64);
|
|
|
|
|
|
|
|
|
|
if(width == VMCS_FIELD_WIDTH_16BIT) {
|
|
|
|
|
VMwrite16_Shadow(encoding, val_32 & 0xffff);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
2013-01-21 23:55:00 +04:00
|
|
|
|
else if(width == VMCS_FIELD_WIDTH_32BIT) {
|
|
|
|
|
// the real hardware write access rights rotated
|
|
|
|
|
if (encoding >= VMCS_32BIT_GUEST_ES_ACCESS_RIGHTS && encoding <= VMCS_32BIT_GUEST_TR_ACCESS_RIGHTS)
|
2013-04-09 19:43:15 +04:00
|
|
|
|
VMwrite32_Shadow(encoding, vmx_from_ar_byte_wr(val_32));
|
2013-01-21 23:55:00 +04:00
|
|
|
|
else
|
|
|
|
|
VMwrite32_Shadow(encoding, val_32);
|
|
|
|
|
}
|
|
|
|
|
else if(width == VMCS_FIELD_WIDTH_64BIT) {
|
|
|
|
|
if (IS_VMCS_FIELD_HI(encoding))
|
|
|
|
|
VMwrite32_Shadow(encoding, val_32);
|
|
|
|
|
else
|
|
|
|
|
VMwrite64_Shadow(encoding, val_64);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
VMwrite64_Shadow(encoding, val_64);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-22 23:06:20 +04:00
|
|
|
|
#endif // BX_SUPPORT_VMX >= 2
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#endif
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMREAD_EdGd(bxInstruction_c *i)
|
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
|
|
|
|
|
|
bx_phy_address vmcs_pointer = BX_CPU_THIS_PTR vmcsptr;
|
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (Vmexit_Vmread(i))
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#endif
|
2013-01-21 23:55:00 +04:00
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_VMREAD, BX_READ);
|
|
|
|
|
|
|
|
|
|
vmcs_pointer = BX_CPU_THIS_PTR vmcs.vmcs_linkptr;
|
|
|
|
|
}
|
2013-01-22 00:20:14 +04:00
|
|
|
|
|
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2013-01-22 00:20:14 +04:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
2013-01-21 23:55:00 +04:00
|
|
|
|
|
|
|
|
|
if (vmcs_pointer == BX_INVALID_VMCSPTR) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMREAD with invalid VMCS ptr !"));
|
|
|
|
|
VMfailInvalid();
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
unsigned encoding = BX_READ_32BIT_REG(i->src());
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
if (! BX_CPU_THIS_PTR vmcs_map->is_valid(encoding)) {
|
2013-01-21 23:55:00 +04:00
|
|
|
|
BX_ERROR(("VMREAD: not supported field 0x%08x", encoding));
|
|
|
|
|
VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-23 23:04:53 +04:00
|
|
|
|
Bit32u field_32;
|
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest)
|
|
|
|
|
field_32 = (Bit32u) vmread_shadow(encoding);
|
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
field_32 = (Bit32u) vmread(encoding);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
|
|
|
|
|
if (i->modC0()) {
|
|
|
|
|
BX_WRITE_32BIT_REGZ(i->dst(), field_32);
|
|
|
|
|
}
|
|
|
|
|
else {
|
2015-05-17 00:06:59 +03:00
|
|
|
|
Bit32u eaddr = (Bit32u) BX_CPU_RESOLVE_ADDR(i);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
write_virtual_dword_32(i->seg(), eaddr, field_32);
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
VMsucceed();
|
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
|
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMREAD_EqGq(bxInstruction_c *i)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
If CR0.PE = 0, the following instructions cause invalid-opcode exceptions and
do not cause VM exits: INVEPT, INVVPID, LLDT, LTR, SLDT, STR, VMCLEAR,
VMLAUNCH, VMPTRLD, VMPTRST, VMREAD, VMRESUME, VMWRITE, VMXOFF,
and VMXON.
2010-04-08 21:00:55 +04:00
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
bx_phy_address vmcs_pointer = BX_CPU_THIS_PTR vmcsptr;
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (Vmexit_Vmread(i))
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#endif
|
2013-01-21 23:55:00 +04:00
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_VMREAD, BX_READ);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
vmcs_pointer = BX_CPU_THIS_PTR vmcs.vmcs_linkptr;
|
|
|
|
|
}
|
2013-01-22 00:20:14 +04:00
|
|
|
|
|
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2013-01-22 00:20:14 +04:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (vmcs_pointer == BX_INVALID_VMCSPTR) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMREAD with invalid VMCS ptr !"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMfailInvalid();
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (BX_READ_64BIT_REG_HIGH(i->src())) {
|
|
|
|
|
BX_ERROR(("VMREAD: not supported field (upper 32-bit not zero)"));
|
|
|
|
|
VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
|
|
|
|
unsigned encoding = BX_READ_32BIT_REG(i->src());
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
if (! BX_CPU_THIS_PTR vmcs_map->is_valid(encoding)) {
|
2013-01-21 23:55:00 +04:00
|
|
|
|
BX_ERROR(("VMREAD: not supported field 0x%08x", encoding));
|
|
|
|
|
VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
2009-10-08 18:33:08 +04:00
|
|
|
|
|
2013-01-23 23:04:53 +04:00
|
|
|
|
Bit64u field_64;
|
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest)
|
|
|
|
|
field_64 = vmread_shadow(encoding);
|
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
field_64 = vmread(encoding);
|
2010-11-12 23:46:59 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (i->modC0()) {
|
|
|
|
|
BX_WRITE_64BIT_REG(i->dst(), field_64);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
2013-01-21 23:55:00 +04:00
|
|
|
|
else {
|
2015-05-17 00:06:59 +03:00
|
|
|
|
Bit64u eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2014-10-21 01:08:29 +04:00
|
|
|
|
write_linear_qword(i->seg(), get_laddr64(i->seg(), eaddr), field_64);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VMsucceed();
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#endif
|
2013-01-21 23:55:00 +04:00
|
|
|
|
|
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE_GdEd(bxInstruction_c *i)
|
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
|
|
|
|
|
|
bx_phy_address vmcs_pointer = BX_CPU_THIS_PTR vmcsptr;
|
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (Vmexit_Vmwrite(i))
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#endif
|
2013-01-21 23:55:00 +04:00
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_VMWRITE, BX_WRITE);
|
|
|
|
|
|
|
|
|
|
vmcs_pointer = BX_CPU_THIS_PTR vmcs.vmcs_linkptr;
|
|
|
|
|
}
|
2013-01-22 00:20:14 +04:00
|
|
|
|
|
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2013-01-22 00:20:14 +04:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (vmcs_pointer == BX_INVALID_VMCSPTR) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMWRITE with invalid VMCS ptr !"));
|
|
|
|
|
VMfailInvalid();
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit32u val_32;
|
|
|
|
|
|
|
|
|
|
if (i->modC0()) {
|
|
|
|
|
val_32 = BX_READ_32BIT_REG(i->src());
|
|
|
|
|
}
|
|
|
|
|
else {
|
2015-05-17 00:06:59 +03:00
|
|
|
|
Bit32u eaddr = (Bit32u) BX_CPU_RESOLVE_ADDR(i);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
val_32 = read_virtual_dword_32(i->seg(), eaddr);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
2010-11-11 19:25:45 +03:00
|
|
|
|
|
2012-08-05 17:52:40 +04:00
|
|
|
|
Bit32u encoding = BX_READ_32BIT_REG(i->dst());
|
2010-11-12 23:46:59 +03:00
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
if (! BX_CPU_THIS_PTR vmcs_map->is_valid(encoding)) {
|
2010-11-12 00:41:03 +03:00
|
|
|
|
BX_ERROR(("VMWRITE: not supported field 0x%08x", encoding));
|
2010-11-11 19:25:45 +03:00
|
|
|
|
VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_INSTR(i);
|
2010-11-11 19:25:45 +03:00
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
if (VMCS_FIELD_TYPE(encoding) == VMCS_FIELD_TYPE_READ_ONLY)
|
|
|
|
|
{
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if ((VMX_MSR_MISC & VMX_MISC_SUPPORT_VMWRITE_READ_ONLY_FIELDS) == 0) {
|
|
|
|
|
BX_ERROR(("VMWRITE: write to read only field 0x%08x", encoding));
|
|
|
|
|
VMfail(VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest)
|
|
|
|
|
vmwrite_shadow(encoding, (Bit64u) val_32);
|
2013-01-22 12:39:41 +04:00
|
|
|
|
else
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#endif
|
2013-01-22 12:39:41 +04:00
|
|
|
|
vmwrite(encoding, (Bit64u) val_32);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
VMsucceed();
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
|
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE_GqEq(bxInstruction_c *i)
|
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
|
|
|
|
|
|
bx_phy_address vmcs_pointer = BX_CPU_THIS_PTR vmcsptr;
|
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (Vmexit_Vmwrite(i))
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#endif
|
2013-01-21 23:55:00 +04:00
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_VMWRITE, BX_WRITE);
|
|
|
|
|
|
|
|
|
|
vmcs_pointer = BX_CPU_THIS_PTR vmcs.vmcs_linkptr;
|
2010-11-12 23:26:01 +03:00
|
|
|
|
}
|
2013-01-22 00:20:14 +04:00
|
|
|
|
|
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2013-01-22 00:20:14 +04:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-11-12 23:26:01 +03:00
|
|
|
|
}
|
2013-01-21 23:55:00 +04:00
|
|
|
|
|
|
|
|
|
if (vmcs_pointer == BX_INVALID_VMCSPTR) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMWRITE with invalid VMCS ptr !"));
|
|
|
|
|
VMfailInvalid();
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit64u val_64;
|
|
|
|
|
|
|
|
|
|
if (i->modC0()) {
|
|
|
|
|
val_64 = BX_READ_64BIT_REG(i->src());
|
2010-11-12 23:26:01 +03:00
|
|
|
|
}
|
2011-07-22 00:58:54 +04:00
|
|
|
|
else {
|
2015-05-17 00:06:59 +03:00
|
|
|
|
Bit64u eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2014-10-21 01:08:29 +04:00
|
|
|
|
val_64 = read_linear_qword(i->seg(), get_laddr64(i->seg(), eaddr));
|
2013-01-21 23:55:00 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (BX_READ_64BIT_REG_HIGH(i->dst())) {
|
|
|
|
|
BX_ERROR(("VMWRITE: not supported field (upper 32-bit not zero)"));
|
|
|
|
|
VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2011-07-22 00:58:54 +04:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
Bit32u encoding = BX_READ_32BIT_REG(i->dst());
|
|
|
|
|
|
2015-07-06 21:46:57 +03:00
|
|
|
|
if (! BX_CPU_THIS_PTR vmcs_map->is_valid(encoding)) {
|
2013-01-21 23:55:00 +04:00
|
|
|
|
BX_ERROR(("VMWRITE: not supported field 0x%08x", encoding));
|
|
|
|
|
VMfail(VMXERR_UNSUPPORTED_VMCS_COMPONENT_ACCESS);
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (VMCS_FIELD_TYPE(encoding) == VMCS_FIELD_TYPE_READ_ONLY)
|
|
|
|
|
{
|
|
|
|
|
if ((VMX_MSR_MISC & VMX_MISC_SUPPORT_VMWRITE_READ_ONLY_FIELDS) == 0) {
|
|
|
|
|
BX_ERROR(("VMWRITE: write to read only field 0x%08x", encoding));
|
|
|
|
|
VMfail(VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2013-01-21 23:55:00 +04:00
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest)
|
|
|
|
|
vmwrite_shadow(encoding, val_64);
|
2013-01-22 12:39:41 +04:00
|
|
|
|
else
|
2013-01-23 23:04:53 +04:00
|
|
|
|
#endif
|
2013-01-22 12:39:41 +04:00
|
|
|
|
vmwrite(encoding, val_64);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMsucceed();
|
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2013-01-21 23:55:00 +04:00
|
|
|
|
#endif
|
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMCLEAR(bxInstruction_c *i)
|
2009-01-31 13:43:24 +03:00
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX
|
If CR0.PE = 0, the following instructions cause invalid-opcode exceptions and
do not cause VM exits: INVEPT, INVVPID, LLDT, LTR, SLDT, STR, VMCLEAR,
VMLAUNCH, VMPTRLD, VMPTRST, VMREAD, VMRESUME, VMWRITE, VMXOFF,
and VMXON.
2010-04-08 21:00:55 +04:00
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_VMCLEAR);
|
|
|
|
|
}
|
|
|
|
|
|
2010-03-16 17:51:20 +03:00
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2010-03-14 18:51:27 +03:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2015-05-17 00:06:59 +03:00
|
|
|
|
bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
Bit64u pAddr = read_virtual_qword(i->seg(), eaddr); // keep 64-bit
|
2012-12-27 01:59:16 +04:00
|
|
|
|
if (! IsValidPageAlignedPhyAddr(pAddr)) {
|
2009-05-21 14:39:40 +04:00
|
|
|
|
BX_ERROR(("VMFAIL: VMCLEAR with invalid physical address!"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMfail(VMXERR_VMCLEAR_WITH_INVALID_ADDR);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2009-05-28 12:26:17 +04:00
|
|
|
|
if (pAddr == BX_CPU_THIS_PTR vmxonptr) {
|
|
|
|
|
BX_ERROR(("VMFAIL: VMLEAR with VMXON ptr !"));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
VMfail(VMXERR_VMCLEAR_WITH_VMXON_VMCS_PTR);
|
|
|
|
|
}
|
|
|
|
|
else {
|
|
|
|
|
// ensure that data for VMCS referenced by the operand is in memory
|
|
|
|
|
// initialize implementation-specific data in VMCS region
|
|
|
|
|
|
|
|
|
|
// clear VMCS launch state
|
|
|
|
|
Bit32u launch_state = VMCS_STATE_CLEAR;
|
2009-02-17 22:20:47 +03:00
|
|
|
|
access_write_physical(pAddr + VMCS_LAUNCH_STATE_FIELD_ADDR, 4, &launch_state);
|
2012-06-18 15:41:26 +04:00
|
|
|
|
BX_NOTIFY_PHY_MEMORY_ACCESS(pAddr + VMCS_LAUNCH_STATE_FIELD_ADDR, 4,
|
2015-02-23 00:26:26 +03:00
|
|
|
|
MEMTYPE(BX_CPU_THIS_PTR vmcs_memtype), BX_WRITE, BX_VMCS_ACCESS, (Bit8u*)(&launch_state));
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2009-05-03 17:02:14 +04:00
|
|
|
|
if (pAddr == BX_CPU_THIS_PTR vmcsptr) {
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BX_CPU_THIS_PTR vmcsptr = BX_INVALID_VMCSPTR;
|
2009-05-03 17:02:14 +04:00
|
|
|
|
BX_CPU_THIS_PTR vmcshostptr = 0;
|
|
|
|
|
}
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
VMsucceed();
|
|
|
|
|
}
|
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
2011-06-26 23:15:30 +04:00
|
|
|
|
#if BX_CPU_LEVEL >= 6
|
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::INVEPT(bxInstruction_c *i)
|
2010-04-07 21:12:17 +04:00
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
If CR0.PE = 0, the following instructions cause invalid-opcode exceptions and
do not cause VM exits: INVEPT, INVVPID, LLDT, LTR, SLDT, STR, VMCLEAR,
VMLAUNCH, VMPTRLD, VMPTRST, VMREAD, VMRESUME, VMWRITE, VMXOFF,
and VMXON.
2010-04-08 21:00:55 +04:00
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
2010-04-07 21:12:17 +04:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2012-08-05 17:52:40 +04:00
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_INVEPT, BX_READ);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2010-04-07 21:12:17 +04:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bx_address type;
|
|
|
|
|
if (i->os64L()) {
|
2012-08-05 17:52:40 +04:00
|
|
|
|
type = BX_READ_64BIT_REG(i->dst());
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
2010-04-08 19:50:39 +04:00
|
|
|
|
else {
|
2012-08-05 17:52:40 +04:00
|
|
|
|
type = BX_READ_32BIT_REG(i->dst());
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BxPackedXmmRegister inv_eptp;
|
2015-05-17 00:06:59 +03:00
|
|
|
|
bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2014-03-24 00:01:58 +04:00
|
|
|
|
read_virtual_xmmword(i->seg(), eaddr, &inv_eptp);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
|
|
|
|
|
switch(type) {
|
|
|
|
|
case BX_INVEPT_INVVPID_SINGLE_CONTEXT_INVALIDATION:
|
|
|
|
|
if (! is_eptptr_valid(inv_eptp.xmm64u(0))) {
|
|
|
|
|
BX_ERROR(("INVEPT: invalid EPTPTR value !"));
|
|
|
|
|
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
2011-09-17 00:06:23 +04:00
|
|
|
|
TLB_flush(); // Invalidate mappings associated with EPTP[51:12]
|
2010-04-07 21:12:17 +04:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_INVEPT_INVVPID_ALL_CONTEXT_INVALIDATION:
|
2011-09-17 00:06:23 +04:00
|
|
|
|
TLB_flush(); // Invalidate mappings associated with all EPTPs
|
2010-04-07 21:12:17 +04:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
BX_ERROR(("INVEPT: not supported type !"));
|
|
|
|
|
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
2011-09-17 00:06:23 +04:00
|
|
|
|
BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_INVEPT, type);
|
2011-03-23 01:18:40 +03:00
|
|
|
|
|
2010-04-07 21:12:17 +04:00
|
|
|
|
VMsucceed();
|
|
|
|
|
#else
|
|
|
|
|
BX_INFO(("INVEPT: required VMXx2 support, use --enable-vmx=2 option"));
|
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::INVVPID(bxInstruction_c *i)
|
2010-04-07 21:12:17 +04:00
|
|
|
|
{
|
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
If CR0.PE = 0, the following instructions cause invalid-opcode exceptions and
do not cause VM exits: INVEPT, INVVPID, LLDT, LTR, SLDT, STR, VMCLEAR,
VMLAUNCH, VMPTRLD, VMPTRST, VMREAD, VMRESUME, VMWRITE, VMXOFF,
and VMXON.
2010-04-08 21:00:55 +04:00
|
|
|
|
if (! BX_CPU_THIS_PTR in_vmx || ! protected_mode() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
|
2010-04-07 21:12:17 +04:00
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
|
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2012-08-05 17:52:40 +04:00
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_INVVPID, BX_READ);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2010-04-07 21:12:17 +04:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bx_address type;
|
|
|
|
|
if (i->os64L()) {
|
2012-08-05 17:52:40 +04:00
|
|
|
|
type = BX_READ_64BIT_REG(i->dst());
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
2010-04-08 19:50:39 +04:00
|
|
|
|
else {
|
2012-08-05 17:52:40 +04:00
|
|
|
|
type = BX_READ_32BIT_REG(i->dst());
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BxPackedXmmRegister invvpid_desc;
|
2015-05-17 00:06:59 +03:00
|
|
|
|
bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2014-03-24 00:01:58 +04:00
|
|
|
|
read_virtual_xmmword(i->seg(), eaddr, &invvpid_desc);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
|
|
|
|
|
if (invvpid_desc.xmm64u(0) > 0xffff) {
|
2011-09-17 00:06:23 +04:00
|
|
|
|
BX_ERROR(("INVVPID: INVVPID_DESC reserved bits set"));
|
2010-04-07 21:12:17 +04:00
|
|
|
|
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit16u vpid = invvpid_desc.xmm16u(0);
|
|
|
|
|
if (vpid == 0 && type != BX_INVEPT_INVVPID_ALL_CONTEXT_INVALIDATION) {
|
|
|
|
|
BX_ERROR(("INVVPID with VPID=0"));
|
|
|
|
|
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch(type) {
|
|
|
|
|
case BX_INVEPT_INVVPID_INDIVIDUAL_ADDRESS_INVALIDATION:
|
|
|
|
|
if (! IsCanonical(invvpid_desc.xmm64u(1))) {
|
|
|
|
|
BX_ERROR(("INVVPID: non canonical LADDR single context invalidation"));
|
|
|
|
|
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
|
2011-07-07 00:01:18 +04:00
|
|
|
|
BX_NEXT_TRACE(i);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TLB_flush(); // invalidate all mappings for address LADDR tagged with VPID
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_INVEPT_INVVPID_SINGLE_CONTEXT_INVALIDATION:
|
|
|
|
|
TLB_flush(); // invalidate all mappings tagged with VPID
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_INVEPT_INVVPID_ALL_CONTEXT_INVALIDATION:
|
|
|
|
|
TLB_flush(); // invalidate all mappings tagged with VPID <> 0
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_INVEPT_INVVPID_SINGLE_CONTEXT_NON_GLOBAL_INVALIDATION:
|
2011-09-17 00:52:38 +04:00
|
|
|
|
TLB_flushNonGlobal(); // invalidate all mappings tagged with VPID except globals
|
2010-04-07 21:12:17 +04:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
2011-09-17 00:06:23 +04:00
|
|
|
|
BX_ERROR(("INVVPID: not supported type !"));
|
|
|
|
|
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
2011-09-17 00:06:23 +04:00
|
|
|
|
BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_INVVPID, type);
|
2011-03-23 01:18:40 +03:00
|
|
|
|
|
2010-04-07 21:12:17 +04:00
|
|
|
|
VMsucceed();
|
|
|
|
|
#else
|
|
|
|
|
BX_INFO(("INVVPID: required VMXx2 support, use --enable-vmx=2 option"));
|
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2010-04-07 21:12:17 +04:00
|
|
|
|
}
|
|
|
|
|
|
2011-09-17 00:06:23 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::INVPCID(bxInstruction_c *i)
|
|
|
|
|
{
|
|
|
|
|
if (v8086_mode()) {
|
2013-01-09 01:03:22 +04:00
|
|
|
|
BX_ERROR(("INVPCID: #GP - not recognized in v8086 mode"));
|
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2011-09-17 00:06:23 +04:00
|
|
|
|
}
|
|
|
|
|
|
2011-11-05 11:31:51 +04:00
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|
// INVPCID will always #UD in legacy VMX mode
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
|
|
|
|
if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_INVPCID)) {
|
|
|
|
|
BX_ERROR(("INVPCID in VMX guest: not allowed to use instruction !"));
|
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
if (VMEXIT(VMX_VM_EXEC_CTRL2_INVLPG_VMEXIT)) {
|
2012-08-05 17:52:40 +04:00
|
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_INVPCID, BX_READ);
|
2011-11-05 11:31:51 +04:00
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
2011-09-17 00:06:23 +04:00
|
|
|
|
if (CPL != 0) {
|
2013-12-03 00:06:59 +04:00
|
|
|
|
BX_ERROR(("%s: with CPL!=0 cause #GP(0)", i->getIaOpcodeNameShort()));
|
2011-09-17 00:06:23 +04:00
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bx_address type;
|
2011-09-17 00:12:36 +04:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
2011-09-17 00:06:23 +04:00
|
|
|
|
if (i->os64L()) {
|
2012-08-05 17:52:40 +04:00
|
|
|
|
type = BX_READ_64BIT_REG(i->dst());
|
2011-09-17 00:06:23 +04:00
|
|
|
|
}
|
2011-09-17 00:12:36 +04:00
|
|
|
|
else
|
|
|
|
|
#endif
|
|
|
|
|
{
|
2012-08-05 17:52:40 +04:00
|
|
|
|
type = BX_READ_32BIT_REG(i->dst());
|
2011-09-17 00:06:23 +04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BxPackedXmmRegister invpcid_desc;
|
2015-05-17 00:06:59 +03:00
|
|
|
|
bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2014-03-24 00:01:58 +04:00
|
|
|
|
read_virtual_xmmword(i->seg(), eaddr, &invpcid_desc);
|
2011-09-17 00:06:23 +04:00
|
|
|
|
|
|
|
|
|
if (invpcid_desc.xmm64u(0) > 0xfff) {
|
|
|
|
|
BX_ERROR(("INVPCID: INVPCID_DESC reserved bits set"));
|
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Bit16u pcid = invpcid_desc.xmm16u(0) & 0xfff;
|
|
|
|
|
|
|
|
|
|
switch(type) {
|
|
|
|
|
case BX_INVPCID_INDIVIDUAL_ADDRESS_NON_GLOBAL_INVALIDATION:
|
2011-09-17 00:12:36 +04:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
2011-09-17 00:06:23 +04:00
|
|
|
|
if (! IsCanonical(invpcid_desc.xmm64u(1))) {
|
|
|
|
|
BX_ERROR(("INVPCID: non canonical LADDR single context invalidation"));
|
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
|
}
|
2011-09-17 00:12:36 +04:00
|
|
|
|
#endif
|
2011-09-17 00:06:23 +04:00
|
|
|
|
if (! BX_CPU_THIS_PTR cr4.get_PCIDE() && pcid != 0) {
|
|
|
|
|
BX_ERROR(("INVPCID: invalid PCID"));
|
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
|
}
|
2011-09-17 00:52:38 +04:00
|
|
|
|
TLB_flushNonGlobal(); // Invalidate all mappings for LADDR tagged with PCID except globals
|
2011-09-17 00:06:23 +04:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_INVPCID_SINGLE_CONTEXT_NON_GLOBAL_INVALIDATION:
|
|
|
|
|
if (! BX_CPU_THIS_PTR cr4.get_PCIDE() && pcid != 0) {
|
|
|
|
|
BX_ERROR(("INVPCID: invalid PCID"));
|
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
|
}
|
2011-09-17 00:52:38 +04:00
|
|
|
|
TLB_flushNonGlobal(); // Invalidate all mappings tagged with PCID except globals
|
2011-09-17 00:06:23 +04:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_INVPCID_ALL_CONTEXT_INVALIDATION:
|
|
|
|
|
TLB_flush(); // Invalidate all mappings tagged with any PCID
|
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
case BX_INVPCID_ALL_CONTEXT_NON_GLOBAL_INVALIDATION:
|
2011-09-17 00:52:38 +04:00
|
|
|
|
TLB_flushNonGlobal(); // Invalidate all mappings tagged with any PCID except globals
|
2011-09-17 00:06:23 +04:00
|
|
|
|
break;
|
|
|
|
|
|
|
|
|
|
default:
|
|
|
|
|
BX_ERROR(("INVPCID: not supported type !"));
|
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_INVPCID, type);
|
|
|
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
|
|
|
|
}
|
|
|
|
|
|
2011-06-26 23:15:30 +04:00
|
|
|
|
#endif
|
|
|
|
|
|
2011-07-30 00:22:35 +04:00
|
|
|
|
BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::GETSEC(bxInstruction_c *i)
|
|
|
|
|
{
|
|
|
|
|
#if BX_CPU_LEVEL >= 6
|
|
|
|
|
if (! BX_CPU_THIS_PTR cr4.get_SMXE())
|
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
|
|
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2012-07-26 20:03:26 +04:00
|
|
|
|
VMexit(VMX_VMEXIT_GETSEC, 0);
|
2011-07-30 00:22:35 +04:00
|
|
|
|
}
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
BX_PANIC(("GETSEC: SMX is not implemented yet !"));
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
|
|
|
|
}
|
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
|
void BX_CPU_C::register_vmx_state(bx_param_c *parent)
|
|
|
|
|
{
|
2014-08-31 23:22:41 +04:00
|
|
|
|
if (! is_cpu_extension_supported(BX_ISA_VMX)) return;
|
2011-12-25 23:35:29 +04:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
// register VMX state for save/restore param tree
|
2012-02-12 22:43:20 +04:00
|
|
|
|
bx_list_c *vmx = new bx_list_c(parent, "VMX");
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmx, vmcsptr, BX_CPU_THIS_PTR vmcsptr);
|
2009-05-28 12:26:17 +04:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmx, vmxonptr, BX_CPU_THIS_PTR vmxonptr);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BXRS_PARAM_BOOL(vmx, in_vmx, BX_CPU_THIS_PTR in_vmx);
|
|
|
|
|
BXRS_PARAM_BOOL(vmx, in_vmx_guest, BX_CPU_THIS_PTR in_vmx_guest);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
BXRS_PARAM_BOOL(vmx, in_smm_vmx, BX_CPU_THIS_PTR in_smm_vmx);
|
|
|
|
|
BXRS_PARAM_BOOL(vmx, in_smm_vmx_guest, BX_CPU_THIS_PTR in_smm_vmx_guest);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
2012-02-12 22:43:20 +04:00
|
|
|
|
bx_list_c *vmcache = new bx_list_c(vmx, "VMCS_CACHE");
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// VM-Execution Control Fields
|
|
|
|
|
//
|
|
|
|
|
|
2012-02-12 22:43:20 +04:00
|
|
|
|
bx_list_c *vmexec_ctrls = new bx_list_c(vmcache, "VMEXEC_CTRLS");
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmexec_ctrls1, BX_CPU_THIS_PTR vmcs.vmexec_ctrls1);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmexec_ctrls2, BX_CPU_THIS_PTR vmcs.vmexec_ctrls2);
|
2010-03-15 19:34:03 +03:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmexec_ctrls3, BX_CPU_THIS_PTR vmcs.vmexec_ctrls3);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_exceptions_bitmap, BX_CPU_THIS_PTR vmcs.vm_exceptions_bitmap);
|
2015-09-30 21:44:01 +03:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, tsc_multiplier, BX_CPU_THIS_PTR vmcs.tsc_multiplier);
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_pf_mask, BX_CPU_THIS_PTR vmcs.vm_pf_mask);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_pf_match, BX_CPU_THIS_PTR vmcs.vm_pf_match);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, io_bitmap_addr1, BX_CPU_THIS_PTR vmcs.io_bitmap_addr[0]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, io_bitmap_addr2, BX_CPU_THIS_PTR vmcs.io_bitmap_addr[1]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, msr_bitmap_addr, BX_CPU_THIS_PTR vmcs.msr_bitmap_addr);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr0_mask, BX_CPU_THIS_PTR vmcs.vm_cr0_mask);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr0_read_shadow, BX_CPU_THIS_PTR vmcs.vm_cr0_read_shadow);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr4_mask, BX_CPU_THIS_PTR vmcs.vm_cr4_mask);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr4_read_shadow, BX_CPU_THIS_PTR vmcs.vm_cr4_read_shadow);
|
|
|
|
|
BXRS_DEC_PARAM_FIELD(vmexec_ctrls, vm_cr3_target_cnt, BX_CPU_THIS_PTR vmcs.vm_cr3_target_cnt);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr3_target_value1, BX_CPU_THIS_PTR vmcs.vm_cr3_target_value[0]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr3_target_value2, BX_CPU_THIS_PTR vmcs.vm_cr3_target_value[1]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr3_target_value3, BX_CPU_THIS_PTR vmcs.vm_cr3_target_value[2]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_cr3_target_value4, BX_CPU_THIS_PTR vmcs.vm_cr3_target_value[3]);
|
2013-01-21 23:55:00 +04:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmcs_linkptr, BX_CPU_THIS_PTR vmcs.vmcs_linkptr);
|
2011-09-26 22:08:31 +04:00
|
|
|
|
#if BX_SUPPORT_X86_64
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, virtual_apic_page_addr, BX_CPU_THIS_PTR vmcs.virtual_apic_page_addr);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_tpr_threshold, BX_CPU_THIS_PTR vmcs.vm_tpr_threshold);
|
2010-03-16 17:51:20 +03:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, apic_access_page, BX_CPU_THIS_PTR vmcs.apic_access_page);
|
2012-10-28 22:32:58 +04:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, apic_access, BX_CPU_THIS_PTR vmcs.apic_access);
|
2011-09-26 23:48:58 +04:00
|
|
|
|
#endif
|
2011-09-26 23:36:20 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2010-04-07 21:12:17 +04:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eptptr, BX_CPU_THIS_PTR vmcs.eptptr);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vpid, BX_CPU_THIS_PTR vmcs.vpid);
|
2015-05-06 22:55:44 +03:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, pml_address, BX_CPU_THIS_PTR vmcs.pml_address);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, pml_index, BX_CPU_THIS_PTR vmcs.pml_index);
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#endif
|
2011-07-22 13:28:31 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2013-01-09 01:03:22 +04:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, pause_loop_exiting_gap, BX_CPU_THIS_PTR vmcs.ple.pause_loop_exiting_gap);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, pause_loop_exiting_window, BX_CPU_THIS_PTR vmcs.ple.pause_loop_exiting_window);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, first_pause_time, BX_CPU_THIS_PTR vmcs.ple.first_pause_time);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, last_pause_time, BX_CPU_THIS_PTR vmcs.ple.last_pause_time);
|
2011-07-22 13:28:31 +04:00
|
|
|
|
#endif
|
2012-10-26 22:43:53 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, svi, BX_CPU_THIS_PTR vmcs.svi);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, rvi, BX_CPU_THIS_PTR vmcs.rvi);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vppr, BX_CPU_THIS_PTR vmcs.vppr);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap0, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[0]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap1, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[1]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap2, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[2]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap3, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[3]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap4, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[4]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap5, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[5]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap6, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[6]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eoi_exit_bitmap7, BX_CPU_THIS_PTR vmcs.eoi_exit_bitmap[7]);
|
|
|
|
|
#endif
|
2013-01-21 23:55:00 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmread_bitmap_addr, BX_CPU_THIS_PTR vmcs.vmread_bitmap_addr);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmwrite_bitmap_addr, BX_CPU_THIS_PTR vmcs.vmwrite_bitmap_addr);
|
|
|
|
|
#endif
|
2013-01-28 20:30:25 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, ve_info_addr, BX_CPU_THIS_PTR vmcs.ve_info_addr);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eptp_index, BX_CPU_THIS_PTR vmcs.eptp_index);
|
|
|
|
|
#endif
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// VM-Exit Control Fields
|
|
|
|
|
//
|
|
|
|
|
|
2012-02-12 22:43:20 +04:00
|
|
|
|
bx_list_c *vmexit_ctrls = new bx_list_c(vmcache, "VMEXIT_CTRLS");
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexit_ctrls, vmexit_ctrls, BX_CPU_THIS_PTR vmcs.vmexit_ctrls);
|
|
|
|
|
BXRS_DEC_PARAM_FIELD(vmexit_ctrls, vmexit_msr_store_cnt, BX_CPU_THIS_PTR vmcs.vmexit_msr_store_cnt);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexit_ctrls, vmexit_msr_store_addr, BX_CPU_THIS_PTR vmcs.vmexit_msr_store_addr);
|
|
|
|
|
BXRS_DEC_PARAM_FIELD(vmexit_ctrls, vmexit_msr_load_cnt, BX_CPU_THIS_PTR vmcs.vmexit_msr_load_cnt);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmexit_ctrls, vmexit_msr_load_addr, BX_CPU_THIS_PTR vmcs.vmexit_msr_load_addr);
|
2011-07-22 13:28:31 +04:00
|
|
|
|
|
2009-01-31 13:43:24 +03:00
|
|
|
|
//
|
|
|
|
|
// VM-Entry Control Fields
|
|
|
|
|
//
|
|
|
|
|
|
2012-02-12 22:43:20 +04:00
|
|
|
|
bx_list_c *vmentry_ctrls = new bx_list_c(vmcache, "VMENTRY_CTRLS");
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmentry_ctrls, vmentry_ctrls, BX_CPU_THIS_PTR vmcs.vmentry_ctrls);
|
|
|
|
|
BXRS_DEC_PARAM_FIELD(vmentry_ctrls, vmentry_msr_load_cnt, BX_CPU_THIS_PTR vmcs.vmentry_msr_load_cnt);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmentry_ctrls, vmentry_msr_load_addr, BX_CPU_THIS_PTR vmcs.vmentry_msr_load_addr);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmentry_ctrls, vmentry_interr_info, BX_CPU_THIS_PTR vmcs.vmentry_interr_info);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmentry_ctrls, vmentry_excep_err_code, BX_CPU_THIS_PTR vmcs.vmentry_excep_err_code);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(vmentry_ctrls, vmentry_instr_length, BX_CPU_THIS_PTR vmcs.vmentry_instr_length);
|
|
|
|
|
|
|
|
|
|
//
|
|
|
|
|
// VMCS Host State
|
|
|
|
|
//
|
|
|
|
|
|
2012-02-12 22:43:20 +04:00
|
|
|
|
bx_list_c *host = new bx_list_c(vmcache, "HOST_STATE");
|
2009-01-31 13:43:24 +03:00
|
|
|
|
|
|
|
|
|
#undef NEED_CPU_REG_SHORTCUTS
|
|
|
|
|
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, CR0, BX_CPU_THIS_PTR vmcs.host_state.cr0);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, CR3, BX_CPU_THIS_PTR vmcs.host_state.cr3);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, CR4, BX_CPU_THIS_PTR vmcs.host_state.cr4);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, ES, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_ES]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, CS, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_CS]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, SS, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_SS]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, DS, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_DS]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, FS, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_FS]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, FS_BASE, BX_CPU_THIS_PTR vmcs.host_state.fs_base);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, GS, BX_CPU_THIS_PTR vmcs.host_state.segreg_selector[BX_SEG_REG_GS]);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, GS_BASE, BX_CPU_THIS_PTR vmcs.host_state.gs_base);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, GDTR_BASE, BX_CPU_THIS_PTR vmcs.host_state.gdtr_base);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, IDTR_BASE, BX_CPU_THIS_PTR vmcs.host_state.idtr_base);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, TR, BX_CPU_THIS_PTR vmcs.host_state.tr_selector);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, TR_BASE, BX_CPU_THIS_PTR vmcs.host_state.tr_base);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, RSP, BX_CPU_THIS_PTR vmcs.host_state.rsp);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, RIP, BX_CPU_THIS_PTR vmcs.host_state.rip);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, sysenter_esp_msr, BX_CPU_THIS_PTR vmcs.host_state.sysenter_esp_msr);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, sysenter_eip_msr, BX_CPU_THIS_PTR vmcs.host_state.sysenter_eip_msr);
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, sysenter_cs_msr, BX_CPU_THIS_PTR vmcs.host_state.sysenter_cs_msr);
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2009-01-31 13:43:24 +03:00
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, pat_msr, BX_CPU_THIS_PTR vmcs.host_state.pat_msr);
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
|
BXRS_HEX_PARAM_FIELD(host, efer_msr, BX_CPU_THIS_PTR vmcs.host_state.efer_msr);
|
|
|
|
|
#endif
|
2010-04-03 11:30:23 +04:00
|
|
|
|
#endif
|
2009-01-31 13:43:24 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#endif // BX_SUPPORT_VMX
|