EPT + VPID - VMXx2 support

This commit is contained in:
Stanislav Shwartsman 2010-04-07 17:12:17 +00:00
parent fa57efb9a8
commit df7db31fb4
9 changed files with 681 additions and 36 deletions

View File

@ -5,6 +5,8 @@ Brief summary :
through .bochsrc (Stanislav)
- Bugfixes for CPU emulation correctness and stability
- Implemented Intel VMXx2 extensions (Stanislav)
- Extended VMX capability MSRs, APIC Virtualization,
Extended Page Tables (EPT), VPID, new VMX controls support
- Implemented PCLMULQDQ AES instruction
- Extended Bochs internal debugger functionality
- USB HP DeskJet 920C printer device emulation (Ben Lunt)
@ -39,11 +41,13 @@ Detailed change log :
- Implemented Intel VMXx2 extensions:
- Enabled extended VMX capability MSRs
- Implemented VMX controls for loading/storing of MSR_PAT and MSR_EFER
- Enabled secondary proc-based vmexec controls:
- Enabled/Implemented secondary proc-based vmexec controls:
- Implemented APIC virtualization
- Implemented WBINVD VMEXIT control
- Implemented Extended Page Tables (EPT) mode
- Implemented Descriptor Table Access VMEXIT control
- Implemented RDTSCP VMEXIT control
- Implemented Virtual Process ID (VPID)
- Implemented WBINVD VMEXIT control
In order to enable emulation of VMXx2 extensions configure with
--enable-vmx=2 option (x86-64 must be enabled)
- Bugfixes for CPU emulation correctness

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.h,v 1.665 2010-04-07 14:38:53 sshwarts Exp $
// $Id: cpu.h,v 1.666 2010-04-07 17:12:17 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001-2010 The Bochs Project
@ -354,6 +354,7 @@ enum {
#define BX_MSR_VMX_CR4_FIXED1 0x489
#define BX_MSR_VMX_VMCS_ENUM 0x48a
#define BX_MSR_VMX_PROCBASED_CTRLS2 0x48b
#define BX_MSR_VMX_MSR_VMX_EPT_VPID_CAP 0x48c
#define BX_MSR_VMX_TRUE_PINBASED_CTRLS 0x48d
#define BX_MSR_VMX_TRUE_PROCBASED_CTRLS 0x48e
#define BX_MSR_VMX_TRUE_VMEXIT_CTRLS 0x48f
@ -2321,6 +2322,11 @@ public: // for now...
BX_SMF void VMWRITE(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
/* VMX instructions */
/* VMXx2 */
BX_SMF void INVEPT(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
BX_SMF void INVVPID(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
/* VMXx2 */
/*** Duplicate SSE instructions ***/
// Although in implementation, these instructions are aliased to the
// another function, it's nice to have them call a separate function when
@ -2800,6 +2806,9 @@ public: // for now...
#endif
#if BX_DEBUGGER || BX_DISASM || BX_INSTRUMENTATION || BX_GDBSTUB
BX_SMF bx_bool dbg_xlate_linear2phy(bx_address linear, bx_phy_address *phy);
#if BX_SUPPORT_VMX >= 2
BX_SMF bx_bool dbg_translate_guest_physical(bx_phy_address guest_paddr, bx_phy_address *phy);
#endif
#endif
BX_SMF void atexit(void);
@ -3099,6 +3108,9 @@ public: // for now...
#endif
#if BX_SUPPORT_X86_64
BX_SMF bx_phy_address translate_linear_long_mode(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned curr_pl, unsigned rw);
#endif
#if BX_SUPPORT_VMX >= 2
BX_SMF bx_phy_address translate_guest_physical(bx_phy_address guest_paddr, bx_address guest_laddr, bx_bool guest_laddr_valid, bx_bool is_page_walk, unsigned rw);
#endif
BX_SMF BX_CPP_INLINE bx_phy_address dtranslate_linear(bx_address laddr, unsigned curr_pl, unsigned rw)
{
@ -3136,7 +3148,10 @@ public: // for now...
BX_SMF bx_bool check_CR4(bx_address val) BX_CPP_AttrRegparmN(1);
#endif
#if BX_CPU_LEVEL >= 6
BX_SMF bx_bool CheckPDPTR(Bit32u cr3_val) BX_CPP_AttrRegparmN(1);
BX_SMF bx_bool CheckPDPTR(bx_phy_address cr3_val) BX_CPP_AttrRegparmN(1);
#endif
#if BX_SUPPORT_VMX >= 2
BX_SMF bx_bool CheckPDPTR(Bit64u *pdptr) BX_CPP_AttrRegparmN(1);
#endif
BX_SMF void reset(unsigned source);
@ -3389,6 +3404,7 @@ public: // for now...
BX_SMF bx_bool is_virtual_apic_page(bx_phy_address paddr) BX_CPP_AttrRegparmN(1);
BX_SMF void VMX_Virtual_Apic_Read(bx_phy_address paddr, unsigned len, void *data);
BX_SMF void VMX_Virtual_Apic_Write(bx_phy_address paddr, unsigned len, void *data);
BX_SMF Bit16u VMX_Get_Current_VPID(void);
#endif
BX_SMF Bit32u VMX_Read_VTPR(void);
BX_SMF void VMX_Write_TPR_Shadow(Bit8u tpr_shadow);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: fetchdecode_sse.h,v 1.10 2010-04-02 19:03:47 sshwarts Exp $
// $Id: fetchdecode_sse.h,v 1.11 2010-04-07 17:12:17 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2005-2010 Stanislav Shwartsman
@ -1082,8 +1082,8 @@ static const BxOpcodeInfo_t BxOpcode3ByteTable0f38[256] = {
/* 7D */ { 0, BX_IA_ERROR },
/* 7E */ { 0, BX_IA_ERROR },
/* 7F */ { 0, BX_IA_ERROR },
/* 80 */ { 0, BX_IA_ERROR },
/* 81 */ { 0, BX_IA_ERROR },
/* 80 */ { BxPrefixSSE66 | BxTraceEnd, BX_IA_INVEPT },
/* 81 */ { BxPrefixSSE66 | BxTraceEnd, BX_IA_INVVPID },
/* 82 */ { 0, BX_IA_ERROR },
/* 83 */ { 0, BX_IA_ERROR },
/* 84 */ { 0, BX_IA_ERROR },

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: ia_opcodes.h,v 1.44 2010-04-04 19:56:55 sshwarts Exp $
// $Id: ia_opcodes.h,v 1.45 2010-04-07 17:12:17 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008-2010 Stanislav Shwartsman
@ -1621,4 +1621,6 @@ bx_define_opcode(BX_IA_VMWRITE_GdEd, &BX_CPU_C::VMWRITE, NULL, BX_CPU_VMX)
bx_define_opcode(BX_IA_VMREAD_EqGq, &BX_CPU_C::VMREAD, NULL, BX_CPU_X86_64 | BX_CPU_VMX)
bx_define_opcode(BX_IA_VMWRITE_GqEq, &BX_CPU_C::VMWRITE, NULL, BX_CPU_X86_64 | BX_CPU_VMX)
#endif
bx_define_opcode(BX_IA_INVEPT, &BX_CPU_C::INVEPT, NULL, BX_CPU_X86_64 | BX_CPU_VMX)
bx_define_opcode(BX_IA_INVVPID, &BX_CPU_C::INVVPID, NULL, BX_CPU_X86_64 | BX_CPU_VMX)
// VMX

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: msr.cc,v 1.46 2010-04-04 19:56:55 sshwarts Exp $
// $Id: msr.cc,v 1.47 2010-04-07 17:12:17 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008-2010 Stanislav Shwartsman
@ -177,6 +177,9 @@ bx_bool BX_CPP_AttrRegparmN(2) BX_CPU_C::rdmsr(Bit32u index, Bit64u *msr)
case BX_MSR_VMX_TRUE_VMENTRY_CTRLS:
val64 = VMX_MSR_VMX_TRUE_VMENTRY_CTRLS;
break;
case BX_MSR_VMX_MSR_VMX_EPT_VPID_CAP:
val64 = VMX_MSR_VMX_EPT_VPID_CAP;
break;
#endif
case BX_MSR_VMX_MISC:
val64 = VMX_MSR_MISC;
@ -279,7 +282,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::RDMSR(bxInstruction_c *i)
}
#if BX_CPU_LEVEL >= 6
BX_CPP_INLINE bx_bool isMemTypeValidMTRR(unsigned memtype)
bx_bool isMemTypeValidMTRR(unsigned memtype)
{
switch(memtype) {
case BX_MEMTYPE_UC:
@ -519,6 +522,7 @@ bx_bool BX_CPP_AttrRegparmN(2) BX_CPU_C::wrmsr(Bit32u index, Bit64u val_64)
case BX_MSR_VMX_CR4_FIXED0:
case BX_MSR_VMX_CR4_FIXED1:
case BX_MSR_VMX_VMCS_ENUM:
case BX_MSR_VMX_MSR_VMX_EPT_VPID_CAP:
case BX_MSR_VMX_TRUE_PINBASED_CTRLS:
case BX_MSR_VMX_TRUE_PROCBASED_CTRLS:
case BX_MSR_VMX_TRUE_VMEXIT_CTRLS:

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: paging.cc,v 1.215 2010-04-07 14:38:53 sshwarts Exp $
// $Id: paging.cc,v 1.216 2010-04-07 17:12:17 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001-2010 The Bochs Project
@ -696,6 +696,12 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
for (leaf = BX_LEVEL_PML4;; --leaf) {
entry_addr[leaf] = ppf + ((laddr >> (9 + 9*leaf)) & 0xff8);
#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_EPT_ENABLE))
entry_addr[leaf] = translate_guest_physical(entry_addr[leaf], laddr, 1, 1, rw);
}
#endif
access_read_physical(entry_addr[leaf], 8, &entry[leaf]);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, entry_addr[leaf], 8, BX_READ, (Bit8u*)(&entry[leaf]));
@ -789,9 +795,15 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
#define PAGING_PAE_PDPTE_RESERVED_BITS \
(BX_PAGING_PHY_ADDRESS_RESERVED_BITS | BX_CONST64(0xFFF00000000001E6))
bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::CheckPDPTR(Bit32u cr3_val)
bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::CheckPDPTR(bx_phy_address cr3_val)
{
cr3_val &= 0xffffffe0;
#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_EPT_ENABLE))
cr3_val = translate_guest_physical(cr3_val, 0, 0, 0, BX_READ);
}
#endif
Bit64u pdptr[4];
int n;
@ -815,6 +827,19 @@ bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::CheckPDPTR(Bit32u cr3_val)
return 1; /* PDPTRs are fine */
}
#if BX_SUPPORT_VMX >= 2
bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::CheckPDPTR(Bit64u *pdptr)
{
for (int n=0; n<4; n++) {
if (pdptr[n] & 0x1) {
if (pdptr[n] & PAGING_PAE_PDPTE_RESERVED_BITS) return 0;
}
}
return 1; /* PDPTRs are fine */
}
#endif
// Translate a linear address to a physical address in PAE paging mode
bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned curr_pl, unsigned rw)
{
@ -846,7 +871,12 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
entry_addr[BX_LEVEL_PDE] = (bx_phy_address)((entry[BX_LEVEL_PDPE] & BX_CONST64(0x000ffffffffff000))
| ((laddr & 0x3fe00000) >> 18));
#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_EPT_ENABLE))
entry_addr[BX_LEVEL_PDE] = translate_guest_physical(entry_addr[BX_LEVEL_PDE], laddr, 1, 1, rw);
}
#endif
access_read_physical(entry_addr[BX_LEVEL_PDE], 8, &entry[BX_LEVEL_PDE]);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, entry_addr[BX_LEVEL_PDE], 8, BX_READ, (Bit8u*)(&entry[BX_LEVEL_PDE]));
@ -871,7 +901,12 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
// 4k pages, Get page table entry.
entry_addr[BX_LEVEL_PTE] = (bx_phy_address)((entry[BX_LEVEL_PDE] & BX_CONST64(0x000ffffffffff000)) |
((laddr & 0x001ff000) >> 9));
#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_EPT_ENABLE))
entry_addr[BX_LEVEL_PTE] = translate_guest_physical(entry_addr[BX_LEVEL_PTE], laddr, 1, 1, rw);
}
#endif
access_read_physical(entry_addr[BX_LEVEL_PTE], 8, &entry[BX_LEVEL_PTE]);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, entry_addr[BX_LEVEL_PTE], 8, BX_READ, (Bit8u*)(&entry[BX_LEVEL_PTE]));
@ -992,6 +1027,12 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
Bit32u pde, pte, cr3_masked = BX_CPU_THIS_PTR cr3 & BX_CR3_PAGING_MASK;
bx_phy_address pde_addr = (bx_phy_address) (cr3_masked | ((laddr & 0xffc00000) >> 20));
#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_EPT_ENABLE))
pde_addr = translate_guest_physical(pde_addr, laddr, 1, 1, rw);
}
#endif
access_read_physical(pde_addr, 4, &pde);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pde_addr, 4, BX_READ, (Bit8u*)(&pde));
@ -1043,7 +1084,12 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
{
// Get page table entry
bx_phy_address pte_addr = (bx_phy_address)((pde & 0xfffff000) | ((laddr & 0x003ff000) >> 10));
#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_EPT_ENABLE))
pte_addr = translate_guest_physical(pte_addr, laddr, 1, 1, rw);
}
#endif
access_read_physical(pte_addr, 4, &pte);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, pte_addr, 4, BX_READ, (Bit8u*)(&pte));
@ -1105,6 +1151,14 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
ppf = (bx_phy_address) lpf;
}
#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_EPT_ENABLE)) {
ppf = translate_guest_physical(ppf, laddr, 1, 0, rw);
}
}
#endif
// Calculate physical memory address and fill in TLB cache entry
paddress = ppf | poffset;
@ -1154,11 +1208,193 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
return paddress;
}
#if BX_SUPPORT_VMX >= 2
/* EPT access type */
#define BX_EPT_READ 0x01
#define BX_EPT_WRITE 0x02
#define BX_EPT_EXECUTE 0x04
/* EPT access mask */
#define BX_EPT_ENTRY_NOT_PRESENT 0x00
#define BX_EPT_ENTRY_READ_ONLY 0x01
#define BX_EPT_ENTRY_WRITE_ONLY 0x02
#define BX_EPT_ENTRY_READ_WRITE 0x03
#define BX_EPT_ENTRY_EXECUTE_ONLY 0x04
#define BX_EPT_ENTRY_READ_EXECUTE 0x05
#define BX_EPT_ENTRY_WRITE_EXECUTE 0x06
#define BX_EPT_ENTRY_READ_WRITE_EXECUTE 0x07
#define PAGING_EPT_RESERVED_BITS (BX_PAGING_PHY_ADDRESS_RESERVED_BITS)
bx_phy_address BX_CPU_C::translate_guest_physical(bx_phy_address guest_paddr, bx_address guest_laddr, bx_bool guest_laddr_valid, bx_bool is_page_walk, unsigned rw)
{
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
bx_phy_address entry_addr[4], ppf = 0, pbase = LPFOf(vm->eptptr);
Bit64u entry[4];
int leaf = BX_LEVEL_PTE;
Bit32u combined_access = 0x7, access_mask = 0;
BX_DEBUG(("EPT walk for guest paddr 0x" FMT_ADDRX, guest_paddr));
if (rw == BX_EXECUTE) access_mask |= BX_EPT_EXECUTE;
if (rw & 1) access_mask |= BX_EPT_WRITE; // write or r-m-w
if (rw == BX_READ) access_mask |= BX_EPT_READ;
Bit32u vmexit_reason = 0, vmexit_qualification = access_mask;
for (leaf = BX_LEVEL_PML4;; --leaf) {
entry_addr[leaf] = pbase + ((guest_paddr >> (9 + 9*leaf)) & 0xff8);
access_read_physical(entry_addr[leaf], 8, &entry[leaf]);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, entry_addr[leaf], 8, BX_READ, (Bit8u*)(&entry[leaf]));
Bit64u curr_entry = entry[leaf];
Bit32u curr_access_mask = curr_entry & 0x7;
combined_access &= curr_access_mask;
if (curr_access_mask == BX_EPT_ENTRY_NOT_PRESENT) {
BX_DEBUG(("EPT %s: not present", bx_paging_level[leaf]));
vmexit_reason = VMX_VMEXIT_EPT_VIOLATION;
break;
}
if (curr_access_mask == BX_EPT_ENTRY_WRITE_ONLY || curr_access_mask == BX_EPT_ENTRY_WRITE_EXECUTE) {
BX_DEBUG(("EPT %s: EPT misconfiguration mask=%d",
bx_paging_level[leaf], curr_access_mask));
vmexit_reason = VMX_VMEXIT_EPT_MISCONFIGURATION;
break;
}
extern bx_bool isMemTypeValidMTRR(unsigned memtype);
if (! isMemTypeValidMTRR((curr_entry >> 3) & 7)) {
BX_DEBUG(("EPT %s: EPT misconfiguration memtype=%d", bx_paging_level[leaf], (curr_entry >> 3) & 7));
vmexit_reason = VMX_VMEXIT_EPT_MISCONFIGURATION;
break;
}
if (curr_entry & PAGING_EPT_RESERVED_BITS) {
BX_DEBUG(("EPT %s: reserved bit is set %08x:%08x",
bx_paging_level[leaf], GET32H(curr_entry), GET32L(curr_entry)));
vmexit_reason = VMX_VMEXIT_EPT_MISCONFIGURATION;
break;
}
pbase = curr_entry & BX_CONST64(0x000ffffffffff000);
if (leaf == BX_LEVEL_PTE) {
// Make up the physical page frame address.
ppf = (bx_phy_address)(curr_entry & BX_CONST64(0x000ffffffffff000));
break;
}
if (curr_entry & 0x80) {
if (leaf > (BX_LEVEL_PDE + BX_SUPPORT_1G_PAGES)) {
BX_DEBUG(("EPT %s: PS bit set !"));
vmexit_reason = VMX_VMEXIT_EPT_VIOLATION;
break;
}
if (leaf == BX_LEVEL_PDPE) {
if (curr_entry & PAGING_PAE_PDPTE1G_RESERVED_BITS) {
BX_DEBUG(("EPT PDPE1G: reserved bit is set: PDPE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
vmexit_reason = VMX_VMEXIT_EPT_VIOLATION;
break;
}
// Make up the physical page frame address.
ppf = (bx_phy_address)((curr_entry & BX_CONST64(0x000fffffc0000000)) | (guest_paddr & 0x3ffff000));
break;
}
if (leaf == BX_LEVEL_PDE) {
if (curr_entry & PAGING_PAE_PDE2M_RESERVED_BITS) {
BX_DEBUG(("EPT PDE2M: reserved bit is set PDE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
vmexit_reason = VMX_VMEXIT_EPT_VIOLATION;
break;
}
// Make up the physical page frame address.
ppf = (bx_phy_address)((curr_entry & BX_CONST64(0x000fffffffe00000)) | (guest_paddr & 0x001ff000));
break;
}
}
}
if ((access_mask & combined_access) != access_mask) {
vmexit_reason = VMX_VMEXIT_EPT_VIOLATION;
}
if (vmexit_reason) {
BX_ERROR(("VMEXIT: EPT %s for guest paddr 0x" FMT_ADDRX " laddr " FMT_ADDRX,
(vmexit_reason == VMX_VMEXIT_EPT_VIOLATION) ? "violation" : "misconfig", guest_paddr, guest_laddr));
VMwrite64(VMCS_64BIT_GUEST_PHYSICAL_ADDR, guest_paddr);
if (guest_laddr_valid) {
VMwrite64(VMCS_GUEST_LINEAR_ADDR, guest_laddr);
vmexit_qualification |= 0x80;
if (is_page_walk) vmexit_qualification |= 0x100;
}
VMexit(0, vmexit_reason, vmexit_qualification | (combined_access << 3));
}
Bit32u page_offset = PAGE_OFFSET(guest_paddr);
return ppf | page_offset;
}
#endif
#if BX_DEBUGGER || BX_DISASM || BX_INSTRUMENTATION || BX_GDBSTUB
#if BX_SUPPORT_VMX >= 2
bx_bool BX_CPU_C::dbg_translate_guest_physical(bx_phy_address guest_paddr, bx_phy_address *phy)
{
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
bx_phy_address pt_address = LPFOf(vm->eptptr);
bx_phy_address offset_mask = 0xfff;
for (int level = 3; level >= 0; --level) {
Bit64u pte;
pt_address += ((guest_paddr >> (9 + 9*level)) & 0xff8);
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, pt_address, 8, &pte);
switch(pte & 7) {
case BX_EPT_ENTRY_NOT_PRESENT:
case BX_EPT_ENTRY_WRITE_ONLY:
case BX_EPT_ENTRY_WRITE_EXECUTE:
return 0;
}
if (pte & BX_PAGING_PHY_ADDRESS_RESERVED_BITS)
return 0;
pt_address = bx_phy_address(pte & BX_CONST64(0x000ffffffffff000));
if (pte & 0x80) {
if (level == BX_LEVEL_PDE) { // 2M page
offset_mask = 0x1fffff;
pt_address &= BX_CONST64(0x000fffffffffe000);
if (pt_address & offset_mask) return 0;
break;
}
#if BX_SUPPORT_1G_PAGES
if (level == BX_LEVEL_PDPE) { // 1G page
offset_mask = 0x3fffffff;
pt_address &= BX_CONST64(0x000fffffffffe000);
if (pt_address & offset_mask) return 0;
break;
}
#endif
if (level != BX_LEVEL_PTE) return 0;
}
}
*phy = pt_address + (bx_phy_address)(guest_paddr & offset_mask);
return 1;
}
#endif
bx_bool BX_CPU_C::dbg_xlate_linear2phy(bx_address laddr, bx_phy_address *phy)
{
if (BX_CPU_THIS_PTR cr0.get_PG() == 0) {
if (! BX_CPU_THIS_PTR cr0.get_PG()) {
*phy = (bx_phy_address) laddr;
return 1;
}
@ -1184,6 +1420,14 @@ bx_bool BX_CPU_C::dbg_xlate_linear2phy(bx_address laddr, bx_phy_address *phy)
for (int level = 2 + long_mode(); level >= 0; --level) {
Bit64u pte;
pt_address += ((laddr >> (9 + 9*level)) & 0xff8);
#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_EPT_ENABLE)) {
if (! dbg_translate_guest_physical(pt_address, &pt_address))
goto page_fault;
}
}
#endif
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, pt_address, 8, &pte);
if(!(pte & 1))
goto page_fault;
@ -1219,6 +1463,14 @@ bx_bool BX_CPU_C::dbg_xlate_linear2phy(bx_address laddr, bx_phy_address *phy)
for (int level = 1; level >= 0; --level) {
Bit32u pte;
pt_address += ((laddr >> (10 + 10*level)) & 0xffc);
#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_EPT_ENABLE)) {
if (! dbg_translate_guest_physical(pt_address, &pt_address))
goto page_fault;
}
}
#endif
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, pt_address, 4, &pte);
if (!(pte & 1))
goto page_fault;
@ -1234,6 +1486,15 @@ bx_bool BX_CPU_C::dbg_xlate_linear2phy(bx_address laddr, bx_phy_address *phy)
paddress = pt_address + (bx_phy_address)(laddr & offset_mask);
}
#if BX_SUPPORT_VMX >= 2
if (BX_CPU_THIS_PTR in_vmx_guest) {
if (SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_EPT_ENABLE)) {
if (! dbg_translate_guest_physical(paddress, &paddress))
goto page_fault;
}
}
#endif
*phy = paddress;
return 1;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: vmexit.cc,v 1.23 2010-04-04 19:23:47 sshwarts Exp $
// $Id: vmexit.cc,v 1.24 2010-04-07 17:12:17 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2009-2010 Stanislav Shwartsman
@ -45,6 +45,8 @@ Bit32u gen_instruction_info(bxInstruction_c *i, Bit32u reason)
#if BX_SUPPORT_VMX >= 2
case VMX_VMEXIT_GDTR_IDTR_ACCESS:
case VMX_VMEXIT_LDTR_TR_ACCESS:
case VMX_VMEXIT_INVEPT:
case VMX_VMEXIT_INVVPID:
#endif
instr_info |= i->nnn() << 28;
break;
@ -117,6 +119,8 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_Instruction(bxInstruction_c *i, Bit
#if BX_SUPPORT_VMX >= 2
case VMX_VMEXIT_GDTR_IDTR_ACCESS:
case VMX_VMEXIT_LDTR_TR_ACCESS:
case VMX_VMEXIT_INVEPT:
case VMX_VMEXIT_INVVPID:
#endif
qualification = (Bit64u) ((bx_address) i->displ32s());
#if BX_SUPPORT_X86_64
@ -764,6 +768,14 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMexit_WBINVD(bxInstruction_c *i)
VMexit(i, VMX_VMEXIT_WBINVD, 0);
}
}
Bit16u BX_CPU_C::VMX_Get_Current_VPID(void)
{
if (! BX_CPU_THIS_PTR in_vmx_guest || !SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_VPID_ENABLE))
return 0;
return BX_CPU_THIS_PTR vmcs.vpid;
}
#endif
#endif // BX_SUPPORT_VMX

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: vmx.cc,v 1.58 2010-04-04 09:04:12 sshwarts Exp $
// $Id: vmx.cc,v 1.59 2010-04-07 17:12:17 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2009-2010 Stanislav Shwartsman
@ -266,6 +266,27 @@ unsigned BX_CPU_C::VMXReadRevisionID(bx_phy_address pAddr)
return revision;
}
#if BX_SUPPORT_VMX >= 2
BX_CPP_INLINE bx_bool is_eptptr_valid(Bit64u eptptr)
{
// [2:0] EPT paging-structure memory type
// 0 = Uncacheable (UC)
// 6 = Write-back (WB)
Bit32u memtype = eptptr & 7;
if (memtype != BX_MEMTYPE_UC && memtype != BX_MEMTYPE_WB) return 0;
// [5:3] This value is 1 less than the EPT page-walk length
Bit32u walk_length = (eptptr >> 3) & 7;
if (walk_length != 3) return 0;
#define BX_EPTPTR_RESERVED_BITS 0xfc0 /* bits 11:6 are reserved */
if (eptptr & BX_EPTPTR_RESERVED_BITS) return 0;
if (! IsValidPhyAddr(eptptr)) return 0;
return 1;
}
#endif
////////////////////////////////////////////////////////////
// VMenter
////////////////////////////////////////////////////////////
@ -333,7 +354,6 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX pin-based controls allowed 0-settings"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
}
if (vm->vmexec_ctrls1 & ~VMX_CHECKS_USE_MSR_VMX_PINBASED_CTRLS_HI) {
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX pin-based controls allowed 1-settings"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
@ -343,7 +363,6 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX proc-based controls allowed 0-settings"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
}
if (vm->vmexec_ctrls2 & ~VMX_CHECKS_USE_MSR_VMX_PROCBASED_CTRLS_HI) {
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX proc-based controls allowed 1-settings"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
@ -354,7 +373,6 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX secondary proc-based controls allowed 0-settings"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
}
if (vm->vmexec_ctrls3 & ~VMX_MSR_VMX_PROCBASED_CTRLS2_HI) {
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX secondary proc-based controls allowed 1-settings"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
@ -431,6 +449,24 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
}
#endif
#if BX_SUPPORT_VMX >= 2
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
vm->eptptr = (bx_phy_address) VMread64(VMCS_64BIT_CONTROL_EPTPTR);
if (! is_eptptr_valid(vm->eptptr)) {
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: invalid EPTPTR value"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
}
}
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_VPID_ENABLE) {
vm->vpid = VMread16(VMCS_16BIT_CONTROL_VPID);
if (vm->vpid != 0) {
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: guest VPID != 0"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
}
}
#endif
//
// Load VM-exit control fields to VMCS Cache
//
@ -447,7 +483,6 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmexit controls allowed 0-settings"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
}
if (vm->vmexit_ctrls & ~VMX_CHECKS_USE_MSR_VMX_VMEXIT_CTRLS_HI) {
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmexit controls allowed 1-settings"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
@ -496,7 +531,6 @@ VMX_error_code BX_CPU_C::VMenterLoadCheckVmControls(void)
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmentry controls allowed 0-settings"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
}
if (vm->vmentry_ctrls & ~VMX_CHECKS_USE_MSR_VMX_VMENTRY_CTRLS_HI) {
BX_ERROR(("VMFAIL: VMCS EXEC CTRL: VMX vmentry controls allowed 1-settings"));
return VMXERR_VMENTRY_INVALID_VM_CONTROL_FIELD;
@ -808,6 +842,7 @@ BX_CPP_INLINE bx_bool IsLimitAccessRightsConsistent(Bit32u limit, Bit32u ar)
Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
{
static const char *segname[] = { "ES", "CS", "SS", "DS", "FS", "GS" };
int n;
VMCS_GUEST_STATE guest;
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
@ -899,7 +934,7 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
// Load and Check Guest State from VMCS - Segment Registers
//
for (int n=0; n<6; n++) {
for (n=0; n<6; n++) {
Bit16u selector = VMread16(VMCS_16BIT_GUEST_ES_SELECTOR + 2*n);
bx_address base = (bx_address) VMread64(VMCS_GUEST_ES_BASE + 2*n);
Bit32u limit = VMread32(VMCS_32BIT_GUEST_ES_LIMIT + 2*n);
@ -1327,10 +1362,25 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
if (! x86_64_guest && (guest.cr4 & (1 << 5)) != 0 /* PAE */) {
// CR0.PG is always set in VMX mode
if (! CheckPDPTR(guest.cr3)) {
*qualification = VMENTER_ERR_GUEST_STATE_PDPTR_LOADING;
BX_ERROR(("VMENTER: Guest State PDPTRs Checks Failed"));
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
#if BX_SUPPORT_VMX >= 2
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
for (n=0;n<4;n++)
guest.pdptr[n] = VMread64(VMCS_64BIT_GUEST_IA32_PDPTE0 + 2*n);
if (! CheckPDPTR(guest.pdptr)) {
*qualification = VMENTER_ERR_GUEST_STATE_PDPTR_LOADING;
BX_ERROR(("VMENTER: Guest State PDPTRs Checks Failed"));
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
}
}
else
#endif
{
if (! CheckPDPTR(guest.cr3)) {
*qualification = VMENTER_ERR_GUEST_STATE_PDPTR_LOADING;
BX_ERROR(("VMENTER: Guest State PDPTRs Checks Failed"));
return VMX_VMEXIT_VMENTRY_FAILURE_GUEST_STATE;
}
}
}
@ -1375,6 +1425,17 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
// APIC ACCESS PAGE caching by host
TLB_flush();
#if BX_SUPPORT_VMX >= 2
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
// load PDPTR only in PAE legacy mode
if (BX_CPU_THIS_PTR cr0.get_PG() && BX_CPU_THIS_PTR cr4.get_PAE() && !x86_64_guest) {
BX_CPU_THIS_PTR PDPTR_CACHE.valid = 1;
for (n = 0; n < 4; n++)
BX_CPU_THIS_PTR PDPTR_CACHE.entry[n] = guest.pdptr[n];
}
}
#endif
if (vmentry_ctrls & VMX_VMENTRY_CTRL1_LOAD_DBG_CTRLS) {
// always clear bits 15:14 and set bit 10
BX_CPU_THIS_PTR dr7 = (guest.dr7 & ~0xc000) | 0400;
@ -1606,17 +1667,34 @@ Bit32u BX_CPU_C::StoreMSRs(Bit32u msr_cnt, bx_phy_address pAddr)
void BX_CPU_C::VMexitSaveGuestState(void)
{
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
int n;
VMwrite64(VMCS_GUEST_CR0, BX_CPU_THIS_PTR cr0.get32());
VMwrite64(VMCS_GUEST_CR3, BX_CPU_THIS_PTR cr3);
VMwrite64(VMCS_GUEST_CR4, BX_CPU_THIS_PTR cr4.get32());
#if BX_SUPPORT_VMX >= 2
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
// save only if guest running in legacy PAE mode
if (BX_CPU_THIS_PTR cr0.get_PG() && BX_CPU_THIS_PTR cr4.get_PAE() && !long_mode()) {
if (! BX_CPU_THIS_PTR PDPTR_CACHE.valid) {
if (! CheckPDPTR(BX_CPU_THIS_PTR cr3))
BX_PANIC(("VMEXIT: PDPTR cache is not valid !"));
}
for(n=0; n<4; n++)
VMwrite64(VMCS_64BIT_GUEST_IA32_PDPTE0 + 2*n, BX_CPU_THIS_PTR PDPTR_CACHE.entry[n]);
}
}
#endif
if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_SAVE_DBG_CTRLS)
VMwrite64(VMCS_GUEST_DR7, BX_CPU_THIS_PTR dr7);
VMwrite64(VMCS_GUEST_RIP, RIP);
VMwrite64(VMCS_GUEST_RSP, RSP);
VMwrite64(VMCS_GUEST_RFLAGS, BX_CPU_THIS_PTR read_eflags());
for (int n=0; n<6; n++) {
for (n=0; n<6; n++) {
Bit32u selector = BX_CPU_THIS_PTR sregs[n].selector.value;
bx_bool invalid = !BX_CPU_THIS_PTR sregs[n].cache.valid;
bx_address base = BX_CPU_THIS_PTR sregs[n].cache.u.segment.base;
@ -1929,6 +2007,8 @@ void BX_CPU_C::VMexit(bxInstruction_c *i, Bit32u reason, Bit64u qualification)
}
}
BX_CPU_THIS_PTR in_vmx_guest = 0;
//
// STEP 2: Load Host State
//
@ -1949,7 +2029,6 @@ void BX_CPU_C::VMexit(bxInstruction_c *i, Bit32u reason, Bit64u qualification)
//
BX_CPU_THIS_PTR disable_INIT = 1; // INIT is disabled in VMX root mode
BX_CPU_THIS_PTR in_vmx_guest = 0;
BX_CPU_THIS_PTR vmx_interrupt_window = 0;
longjmp(BX_CPU_THIS_PTR jmp_buf_env, 1); // go back to main decode loop
@ -2417,6 +2496,11 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMREAD(bxInstruction_c *i)
Bit64u field_64;
switch(encoding) {
/* VMCS 16-bit control fields */
/* binary 0000_00xx_xxxx_xxx0 */
case VMCS_16BIT_CONTROL_VPID:
// fall through
/* VMCS 16-bit guest-state fields */
/* binary 0000_10xx_xxxx_xxx0 */
case VMCS_16BIT_GUEST_ES_SELECTOR:
@ -2521,6 +2605,9 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMREAD(bxInstruction_c *i)
case VMCS_64BIT_CONTROL_TSC_OFFSET:
case VMCS_64BIT_CONTROL_VIRTUAL_APIC_PAGE_ADDR:
case VMCS_64BIT_CONTROL_APIC_ACCESS_ADDR:
#if BX_SUPPORT_VMX >= 2
case VMCS_64BIT_CONTROL_EPTPTR:
#endif
field_64 = VMread64(encoding);
break;
@ -2534,15 +2621,36 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMREAD(bxInstruction_c *i)
case VMCS_64BIT_CONTROL_TSC_OFFSET_HI:
case VMCS_64BIT_CONTROL_VIRTUAL_APIC_PAGE_ADDR_HI:
case VMCS_64BIT_CONTROL_APIC_ACCESS_ADDR_HI:
#if BX_SUPPORT_VMX >= 2
case VMCS_64BIT_CONTROL_EPTPTR_HI:
#endif
field_64 = VMread32(encoding);
break;
#if BX_SUPPORT_VMX >= 2
/* VMCS 64-bit read only data fields */
/* binary 0010_01xx_xxxx_xxx0 */
case VMCS_64BIT_GUEST_PHYSICAL_ADDR:
field_64 = VMread64(encoding);
break;
case VMCS_64BIT_GUEST_PHYSICAL_ADDR_HI:
field_64 = VMread32(encoding);
break;
#endif
/* VMCS 64-bit guest state fields */
/* binary 0010_10xx_xxxx_xxx0 */
case VMCS_64BIT_GUEST_LINK_POINTER:
case VMCS_64BIT_GUEST_IA32_DEBUGCTL:
case VMCS_64BIT_GUEST_IA32_PAT:
case VMCS_64BIT_GUEST_IA32_EFER:
#if BX_SUPPORT_VMX >= 2
case VMCS_64BIT_GUEST_IA32_PDPTE0:
case VMCS_64BIT_GUEST_IA32_PDPTE1:
case VMCS_64BIT_GUEST_IA32_PDPTE2:
case VMCS_64BIT_GUEST_IA32_PDPTE3:
#endif
field_64 = VMread64(encoding);
break;
@ -2550,6 +2658,12 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMREAD(bxInstruction_c *i)
case VMCS_64BIT_GUEST_IA32_DEBUGCTL_HI:
case VMCS_64BIT_GUEST_IA32_PAT_HI:
case VMCS_64BIT_GUEST_IA32_EFER_HI:
#if BX_SUPPORT_VMX >= 2
case VMCS_64BIT_GUEST_IA32_PDPTE0_HI:
case VMCS_64BIT_GUEST_IA32_PDPTE1_HI:
case VMCS_64BIT_GUEST_IA32_PDPTE2_HI:
case VMCS_64BIT_GUEST_IA32_PDPTE3_HI:
#endif
field_64 = VMread32(encoding);
break;
@ -2735,6 +2849,11 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE(bxInstruction_c *i)
}
*/
switch(encoding) {
/* VMCS 16-bit control fields */
/* binary 0000_00xx_xxxx_xxx0 */
case VMCS_16BIT_CONTROL_VPID:
// fall through
/* VMCS 16-bit guest-state fields */
/* binary 0000_10xx_xxxx_xxx0 */
case VMCS_16BIT_GUEST_ES_SELECTOR:
@ -2827,6 +2946,9 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE(bxInstruction_c *i)
case VMCS_64BIT_CONTROL_TSC_OFFSET_HI:
case VMCS_64BIT_CONTROL_VIRTUAL_APIC_PAGE_ADDR_HI:
case VMCS_64BIT_CONTROL_APIC_ACCESS_ADDR_HI:
#if BX_SUPPORT_VMX >= 2
case VMCS_64BIT_CONTROL_EPTPTR_HI:
#endif
// fall through
/* VMCS 64-bit guest state fields */
@ -2835,6 +2957,12 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE(bxInstruction_c *i)
case VMCS_64BIT_GUEST_IA32_DEBUGCTL_HI:
case VMCS_64BIT_GUEST_IA32_PAT_HI:
case VMCS_64BIT_GUEST_IA32_EFER_HI:
#if BX_SUPPORT_VMX >= 2
case VMCS_64BIT_GUEST_IA32_PDPTE0_HI:
case VMCS_64BIT_GUEST_IA32_PDPTE1_HI:
case VMCS_64BIT_GUEST_IA32_PDPTE2_HI:
case VMCS_64BIT_GUEST_IA32_PDPTE3_HI:
#endif
// fall through
/* VMCS 64-bit host state fields */
@ -2856,6 +2984,9 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE(bxInstruction_c *i)
case VMCS_64BIT_CONTROL_TSC_OFFSET:
case VMCS_64BIT_CONTROL_VIRTUAL_APIC_PAGE_ADDR:
case VMCS_64BIT_CONTROL_APIC_ACCESS_ADDR:
#if BX_SUPPORT_VMX >= 2
case VMCS_64BIT_CONTROL_EPTPTR:
#endif
// fall through
/* VMCS 64-bit guest state fields */
@ -2864,6 +2995,12 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE(bxInstruction_c *i)
case VMCS_64BIT_GUEST_IA32_DEBUGCTL:
case VMCS_64BIT_GUEST_IA32_PAT:
case VMCS_64BIT_GUEST_IA32_EFER:
#if BX_SUPPORT_VMX >= 2
case VMCS_64BIT_GUEST_IA32_PDPTE0:
case VMCS_64BIT_GUEST_IA32_PDPTE1:
case VMCS_64BIT_GUEST_IA32_PDPTE2:
case VMCS_64BIT_GUEST_IA32_PDPTE3:
#endif
// fall through
/* VMCS 64-bit host state fields */
@ -2925,6 +3062,14 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMWRITE(bxInstruction_c *i)
VMwrite64(encoding, val_64);
break;
#if BX_SUPPORT_VMX >= 2
/* VMCS 64-bit read only data fields */
/* binary 0010_01xx_xxxx_xxx0 */
case VMCS_64BIT_GUEST_PHYSICAL_ADDR:
case VMCS_64BIT_GUEST_PHYSICAL_ADDR_HI:
// fall through
#endif
/* VMCS 32-bit read only data fields */
/* binary 0100_01xx_xxxx_xxx0 */
case VMCS_32BIT_INSTRUCTION_ERROR:
@ -3012,6 +3157,152 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::VMCLEAR(bxInstruction_c *i)
#endif
}
void BX_CPP_AttrRegparmN(1) BX_CPU_C::INVEPT(bxInstruction_c *i)
{
#if BX_SUPPORT_VMX >= 2
if (! BX_CPU_THIS_PTR in_vmx || BX_CPU_THIS_PTR get_VM() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
exception(BX_UD_EXCEPTION, 0);
/* source must be memory reference */
if (i->modC0()) {
BX_INFO(("INVEPT: must be memory reference"));
exception(BX_UD_EXCEPTION, 0);
}
if (BX_CPU_THIS_PTR in_vmx_guest) {
BX_ERROR(("VMEXIT: INVEPT in VMX non-root operation"));
VMexit_Instruction(i, VMX_VMEXIT_INVEPT);
}
if (CPL != 0) {
BX_ERROR(("INVEPT with CPL!=0 will cause #GP(0)"));
exception(BX_GP_EXCEPTION, 0);
}
bx_address type;
if (i->os64L()) {
type = BX_READ_64BIT_REG(i->nnn());
}
else
{
type = BX_READ_32BIT_REG(i->nnn());
}
BxPackedXmmRegister inv_eptp;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
read_virtual_dqword(i->seg(), eaddr, (Bit8u *) &inv_eptp);
switch(type) {
case BX_INVEPT_INVVPID_SINGLE_CONTEXT_INVALIDATION:
if (! is_eptptr_valid(inv_eptp.xmm64u(0))) {
BX_ERROR(("INVEPT: invalid EPTPTR value !"));
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
return;
}
TLB_flush();
break;
case BX_INVEPT_INVVPID_ALL_CONTEXT_INVALIDATION:
TLB_flush();
break;
default:
BX_ERROR(("INVEPT: not supported type !"));
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
return;
}
VMsucceed();
#else
BX_INFO(("INVEPT: required VMXx2 support, use --enable-vmx=2 option"));
exception(BX_UD_EXCEPTION, 0);
#endif
}
void BX_CPP_AttrRegparmN(1) BX_CPU_C::INVVPID(bxInstruction_c *i)
{
#if BX_SUPPORT_VMX >= 2
if (! BX_CPU_THIS_PTR in_vmx || BX_CPU_THIS_PTR get_VM() || BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_COMPAT)
exception(BX_UD_EXCEPTION, 0);
/* source must be memory reference */
if (i->modC0()) {
BX_INFO(("INVVPID: must be memory reference"));
exception(BX_UD_EXCEPTION, 0);
}
if (BX_CPU_THIS_PTR in_vmx_guest) {
BX_ERROR(("VMEXIT: INVVPID in VMX non-root operation"));
VMexit_Instruction(i, VMX_VMEXIT_INVVPID);
}
if (CPL != 0) {
BX_ERROR(("INVVPID with CPL!=0 will cause #GP(0)"));
exception(BX_GP_EXCEPTION, 0);
}
bx_address type;
if (i->os64L()) {
type = BX_READ_64BIT_REG(i->nnn());
}
else
{
type = BX_READ_32BIT_REG(i->nnn());
}
BxPackedXmmRegister invvpid_desc;
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
read_virtual_dqword(i->seg(), eaddr, (Bit8u *) &invvpid_desc);
if (invvpid_desc.xmm64u(0) > 0xffff) {
BX_ERROR(("INVVPID: INVVPID_DESC reserved bits are set"));
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
return;
}
Bit16u vpid = invvpid_desc.xmm16u(0);
if (vpid == 0 && type != BX_INVEPT_INVVPID_ALL_CONTEXT_INVALIDATION) {
BX_ERROR(("INVVPID with VPID=0"));
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
return;
}
switch(type) {
case BX_INVEPT_INVVPID_INDIVIDUAL_ADDRESS_INVALIDATION:
if (! IsCanonical(invvpid_desc.xmm64u(1))) {
BX_ERROR(("INVVPID: non canonical LADDR single context invalidation"));
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
return;
}
TLB_flush(); // invalidate all mappings for address LADDR tagged with VPID
break;
case BX_INVEPT_INVVPID_SINGLE_CONTEXT_INVALIDATION:
TLB_flush(); // invalidate all mappings tagged with VPID
break;
case BX_INVEPT_INVVPID_ALL_CONTEXT_INVALIDATION:
TLB_flush(); // invalidate all mappings tagged with VPID <> 0
break;
case BX_INVEPT_INVVPID_SINGLE_CONTEXT_NON_GLOBAL_INVALIDATION:
TLB_flush(); // invalidate all mappings tagged with VPID except globals
break;
default:
BX_ERROR(("INVVPID: not supported type !"));
VMfail(VMXERR_INVALID_INVEPT_INVVPID);
return;
}
VMsucceed();
#else
BX_INFO(("INVVPID: required VMXx2 support, use --enable-vmx=2 option"));
exception(BX_UD_EXCEPTION, 0);
#endif
}
#if BX_SUPPORT_VMX
void BX_CPU_C::register_vmx_state(bx_param_c *parent)
{
@ -3032,7 +3323,7 @@ void BX_CPU_C::register_vmx_state(bx_param_c *parent)
// VM-Execution Control Fields
//
bx_list_c *vmexec_ctrls = new bx_list_c(vmcache, "VMEXEC_CTRLS", 23);
bx_list_c *vmexec_ctrls = new bx_list_c(vmcache, "VMEXEC_CTRLS", 25);
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmexec_ctrls1, BX_CPU_THIS_PTR vmcs.vmexec_ctrls1);
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vmexec_ctrls2, BX_CPU_THIS_PTR vmcs.vmexec_ctrls2);
@ -3057,6 +3348,8 @@ void BX_CPU_C::register_vmx_state(bx_param_c *parent)
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vm_tpr_threshold, BX_CPU_THIS_PTR vmcs.vm_tpr_threshold);
#if BX_SUPPORT_VMX >= 2
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, apic_access_page, BX_CPU_THIS_PTR vmcs.apic_access_page);
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, eptptr, BX_CPU_THIS_PTR vmcs.eptptr);
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, vpid, BX_CPU_THIS_PTR vmcs.vpid);
#endif
BXRS_HEX_PARAM_FIELD(vmexec_ctrls, executive_vmcsptr, BX_CPU_THIS_PTR vmcs.executive_vmcsptr);
@ -3089,7 +3382,7 @@ void BX_CPU_C::register_vmx_state(bx_param_c *parent)
// VM-Exit Information Fields
//
/*
bx_list_c *vmexit_info = new bx_list_c(vmcache, "VMEXIT_INFO", 14);
bx_list_c *vmexit_info = new bx_list_c(vmcache, "VMEXIT_INFO", 15);
BXRS_HEX_PARAM_FIELD(vmexit_info, vmexit_reason, BX_CPU_THIS_PTR vmcs.vmexit_reason);
BXRS_HEX_PARAM_FIELD(vmexit_info, vmexit_qualification, BX_CPU_THIS_PTR vmcs.vmexit_qualification);
@ -3100,6 +3393,9 @@ void BX_CPU_C::register_vmx_state(bx_param_c *parent)
BXRS_HEX_PARAM_FIELD(vmexit_info, vmexit_instr_info, BX_CPU_THIS_PTR vmcs.vmexit_instr_info);
BXRS_HEX_PARAM_FIELD(vmexit_info, vmexit_instr_length, BX_CPU_THIS_PTR vmcs.vmexit_instr_length);
BXRS_HEX_PARAM_FIELD(vmexit_info, vmexit_guest_laddr, BX_CPU_THIS_PTR vmcs.vmexit_guest_laddr);
#if BX_SUPPORT_VMX >= 2
BXRS_HEX_PARAM_FIELD(vmexit_info, vmexit_guest_paddr, BX_CPU_THIS_PTR vmcs.vmexit_guest_paddr);
#endif
BXRS_HEX_PARAM_FIELD(vmexit_info, vmexit_io_rcx, BX_CPU_THIS_PTR vmcs.vmexit_io_rcx);
BXRS_HEX_PARAM_FIELD(vmexit_info, vmexit_io_rsi, BX_CPU_THIS_PTR vmcs.vmexit_io_rsi);
BXRS_HEX_PARAM_FIELD(vmexit_info, vmexit_io_rdi, BX_CPU_THIS_PTR vmcs.vmexit_io_rdi);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: vmx.h,v 1.26 2010-04-04 19:23:47 sshwarts Exp $
// $Id: vmx.h,v 1.27 2010-04-07 17:12:17 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2009 Stanislav Shwartsman
@ -164,6 +164,10 @@ enum VMX_vmabort_code {
// VMCS fields
// =============
/* VMCS 16-bit control fields */
/* binary 0000_00xx_xxxx_xxx0 */
#define VMCS_16BIT_CONTROL_VPID 0x00000000
/* VMCS 16-bit guest-state fields */
/* binary 0000_10xx_xxxx_xxx0 */
#define VMCS_16BIT_GUEST_ES_SELECTOR 0x00000800
@ -454,6 +458,7 @@ typedef struct bx_VMCS_GUEST_STATE
Bit64u efer_msr;
#endif
Bit64u pat_msr;
Bit64u pdptr[4];
#endif
} VMCS_GUEST_STATE;
@ -586,8 +591,10 @@ typedef struct bx_VMCS
#define VMX_VM_EXEC_CTRL3_SUPPORTED_BITS \
(VMX_VM_EXEC_CTRL3_VIRTUALIZE_APIC_ACCESSES | \
VMX_VM_EXEC_CTRL3_EPT_ENABLE | \
VMX_VM_EXEC_CTRL3_DESCRIPTOR_TABLE_VMEXIT | \
VMX_VM_EXEC_CTRL3_RDTSCP | \
VMX_VM_EXEC_CTRL3_VPID_ENABLE | \
VMX_VM_EXEC_CTRL3_WBINVD_VMEXIT)
#endif
@ -615,6 +622,8 @@ typedef struct bx_VMCS
Bit32u vm_tpr_threshold;
#if BX_SUPPORT_VMX >= 2
bx_phy_address apic_access_page;
Bit64u eptptr;
Bit16u vpid;
#endif
Bit64u executive_vmcsptr;
@ -645,8 +654,8 @@ typedef struct bx_VMCS
VMX_VMEXIT_CTRL1_INTA_ON_VMEXIT | \
((BX_SUPPORT_VMX >= 2) ? VMX_VMEXIT_CTRL1_STORE_PAT_MSR : 0) | \
((BX_SUPPORT_VMX >= 2) ? VMX_VMEXIT_CTRL1_LOAD_PAT_MSR : 0) | \
((BX_SUPPORT_VMX >= 2 && BX_SUPPORT_X86_64) ? VMX_VMEXIT_CTRL1_STORE_EFER_MSR : 0) | \
((BX_SUPPORT_VMX >= 2 && BX_SUPPORT_X86_64) ? VMX_VMEXIT_CTRL1_LOAD_EFER_MSR : 0))
((BX_SUPPORT_VMX >= 2) ? VMX_VMEXIT_CTRL1_STORE_EFER_MSR : 0) | \
((BX_SUPPORT_VMX >= 2) ? VMX_VMEXIT_CTRL1_LOAD_EFER_MSR : 0))
#endif
@ -681,7 +690,7 @@ typedef struct bx_VMCS
VMX_VMENTRY_CTRL1_SMM_ENTER | \
VMX_VMENTRY_CTRL1_DEACTIVATE_DUAL_MONITOR_TREATMENT | \
((BX_SUPPORT_VMX >= 2) ? VMX_VMENTRY_CTRL1_LOAD_PAT_MSR : 0) | \
((BX_SUPPORT_VMX >= 2 && BX_SUPPORT_X86_64) ? VMX_VMENTRY_CTRL1_LOAD_EFER_MSR : 0))
((BX_SUPPORT_VMX >= 2) ? VMX_VMENTRY_CTRL1_LOAD_EFER_MSR : 0))
#endif
@ -703,6 +712,9 @@ typedef struct bx_VMCS
Bit32u vmexit_instr_info;
Bit32u vmexit_instr_length;
bx_address vmexit_guest_laddr;
#if BX_SUPPORT_VMX >= 2
bx_phy_address vmexit_guest_paddr;
#endif
Bit32u vmexit_excep_info;
Bit32u vmexit_excep_error_code;
@ -964,6 +976,8 @@ enum VMX_Activity_State {
((((Bit64u) VMX_MSR_VMCS_ENUM_HI) << 32) | VMX_MSR_VMCS_ENUM_LO)
#if BX_SUPPORT_VMX >= 2
// IA32_VMX_MSR_PROCBASED_CTRLS2 MSR (0x48b)
// -----------------------------
@ -976,4 +990,40 @@ enum VMX_Activity_State {
#define VMX_MSR_VMX_PROCBASED_CTRLS2 \
((((Bit64u) VMX_MSR_VMX_PROCBASED_CTRLS2_HI) << 32) | VMX_MSR_VMX_PROCBASED_CTRLS2_LO)
// IA32_VMX_EPT_VPID_CAP MSR (0x48c)
// ---------------------
enum VMX_INVEPT_INVVPID_type {
BX_INVEPT_INVVPID_INDIVIDUAL_ADDRESS_INVALIDATION = 0,
BX_INVEPT_INVVPID_SINGLE_CONTEXT_INVALIDATION,
BX_INVEPT_INVVPID_ALL_CONTEXT_INVALIDATION,
BX_INVEPT_INVVPID_SINGLE_CONTEXT_NON_GLOBAL_INVALIDATION
};
// [0] - BX_EPT_ENTRY_EXECUTE_ONLY support
// [6] - 4-levels page walk length
// [8] - allow UC EPT paging structure memory type
// [14] - allow WB EPT paging structure memory type
// [16] - EPT 2M pages support
// [17] - EPT 1G pages support
// [20] - INVEPT instruction supported
// [25] - INVEPT single-context invalidation supported
// [26] - INVEPT all-context invalidation supported
#define VMX_MSR_VMX_EPT_VPID_CAP_LO (0x06114141 | (BX_SUPPORT_1G_PAGES << 17))
// [32] - INVVPID instruction supported
// [40] - individual-address INVVPID is supported
// [41] - single-context INVVPID is supported
// [42] - all-context INVVPID is supported
// [43] - single-context-retaining-globals INVVPID is supported
#define VMX_MSR_VMX_EPT_VPID_CAP_HI (0x00000f01)
#define VMX_MSR_VMX_EPT_VPID_CAP \
((((Bit64u) VMX_MSR_VMX_EPT_VPID_CAP_HI) << 32) | VMX_MSR_VMX_EPT_VPID_CAP_LO)
#endif
#endif // _BX_VMX_INTEL_H_