Implemented EPT A/D extensions support.

Bochs is fully aligned with the latest published revision of
Intel Architecture Manual (revision 043) now.
This commit is contained in:
Stanislav Shwartsman 2012-05-02 18:11:39 +00:00
parent af74c6df44
commit 39c14ef0d1
9 changed files with 88 additions and 33 deletions

View File

@ -1,6 +1,9 @@
Changes after 2.5.1 release:
- CPU
- Implemented EPT A/D extensions support.
Bochs is fully aligned with the latest published revision of
Intel Architecture Manual (revision 043) now.
- Another 10% CPU emulation speedup with even more optimal lazy flags
handling and stack access optimizations
- Support for AMD's SVM hardware emulation in Bochs CPU, to enable
@ -16,7 +19,7 @@ Changes after 2.5.1 release:
- Implemented new debugger command 'info device [string]' that shows the
state of the device specified in 'string'
- Improved debug dump for ne2k, pci, pic and vga/cirrus devices. Added
debug dump for pci2isa, i/o apic, floppy and dma controller devices.
debug dump for pci2isa, i/o apic, pit, floppy and dma controller devices.
- Added TLB to CPU param tree - now it can be browsed from Bochs internal
debugger and Bochs debugger GUI through param tree interfaces
- Implemented 'writemem' debugger command to dump virtual memory block

View File

@ -3979,18 +3979,20 @@ public: // for now...
// linear address for translate_linear expected to be canonical !
BX_SMF bx_phy_address translate_linear(bx_address laddr, unsigned user, unsigned rw);
BX_SMF bx_phy_address translate_linear_legacy(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw);
BX_SMF void update_access_dirty(bx_phy_address *entry_addr, Bit32u *entry, unsigned leaf, bx_bool write);
BX_SMF void update_access_dirty(bx_phy_address *entry_addr, Bit32u *entry, unsigned leaf, unsigned write);
#if BX_CPU_LEVEL >= 6
BX_SMF bx_phy_address translate_linear_load_PDPTR(bx_address laddr, unsigned user, unsigned rw);
BX_SMF bx_phy_address translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw);
BX_SMF int check_entry_PAE(const char *s, Bit64u entry, Bit64u reserved, unsigned rw, bx_bool *nx_fault);
BX_SMF void update_access_dirty_PAE(bx_phy_address *entry_addr, Bit64u *entry, unsigned max_level, unsigned leaf, bx_bool write);
BX_SMF void update_access_dirty_PAE(bx_phy_address *entry_addr, Bit64u *entry, unsigned max_level, unsigned leaf, unsigned write);
#endif
#if BX_SUPPORT_X86_64
BX_SMF bx_phy_address translate_linear_long_mode(bx_address laddr, Bit32u &lpf_mask, Bit32u &combined_access, unsigned user, unsigned rw);
#endif
#if BX_SUPPORT_VMX >= 2
BX_SMF bx_phy_address translate_guest_physical(bx_phy_address guest_paddr, bx_address guest_laddr, bx_bool guest_laddr_valid, bx_bool is_page_walk, unsigned rw);
BX_SMF void update_ept_access_dirty(bx_phy_address *entry_addr, Bit64u *entry, unsigned leaf, unsigned write);
BX_SMF bx_bool is_eptptr_valid(Bit64u eptptr);
#endif
#if BX_SUPPORT_SVM
BX_SMF void nested_page_fault(unsigned fault, bx_phy_address guest_paddr, unsigned rw, unsigned is_page_walk);

View File

@ -157,6 +157,7 @@ typedef bx_cpuid_t* (*bx_create_cpuid_method)(BX_CPU_C *cpu);
#define BX_VMX_DESCRIPTOR_TABLE_EXIT (1 << 14) /* Descriptor Table VMEXIT */
#define BX_VMX_PAUSE_LOOP_EXITING (1 << 15) /* Pause Loop Exiting */
#define BX_VMX_EPTP_SWITCHING (1 << 16) /* EPTP switching (VM Function 0) */
#define BX_VMX_EPT_ACCESS_DIRTY (1 << 17) /* Extended Page Tables (EPT) A/D Bits */
// CPUID defines - STD features CPUID[0x00000001].EDX
// ----------------------------

View File

@ -187,7 +187,7 @@ bx_bool BX_CPP_AttrRegparmN(2) BX_CPU_C::rdmsr(Bit32u index, Bit64u *msr)
val64 = VMX_MSR_VMX_TRUE_VMENTRY_CTRLS;
break;
case BX_MSR_VMX_EPT_VPID_CAP:
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPT) || BX_SUPPORT_VMX_EXTENSION(BX_VMX_VPID)) {
if (VMX_MSR_VMX_EPT_VPID_CAP != 0) {
val64 = VMX_MSR_VMX_EPT_VPID_CAP;
break;
}

View File

@ -761,7 +761,7 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
#endif
void BX_CPU_C::update_access_dirty_PAE(bx_phy_address *entry_addr, Bit64u *entry, unsigned max_level, unsigned leaf, bx_bool write)
void BX_CPU_C::update_access_dirty_PAE(bx_phy_address *entry_addr, Bit64u *entry, unsigned max_level, unsigned leaf, unsigned write)
{
// Update A bit if needed
for (unsigned level=max_level; level > leaf; level--) {
@ -1073,7 +1073,7 @@ bx_phy_address BX_CPU_C::translate_linear_legacy(bx_address laddr, Bit32u &lpf_m
return ppf | (laddr & lpf_mask);
}
void BX_CPU_C::update_access_dirty(bx_phy_address *entry_addr, Bit32u *entry, unsigned leaf, bx_bool write)
void BX_CPU_C::update_access_dirty(bx_phy_address *entry_addr, Bit32u *entry, unsigned leaf, unsigned write)
{
if (leaf == BX_LEVEL_PTE) {
// Update PDE A bit if needed
@ -1492,6 +1492,10 @@ bx_phy_address BX_CPU_C::translate_guest_physical(bx_phy_address guest_paddr, bx
BX_DEBUG(("EPT walk for guest paddr 0x" FMT_ADDRX, guest_paddr));
// when EPT A/D enabled treat guest page table accesses as writes
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPT_ACCESS_DIRTY) && is_page_walk && guest_laddr_valid)
rw = BX_WRITE;
if (rw == BX_EXECUTE) access_mask |= BX_EPT_EXECUTE;
if (rw & 1) access_mask |= BX_EPT_WRITE; // write or r-m-w
if (rw == BX_READ) access_mask |= BX_EPT_READ;
@ -1582,10 +1586,36 @@ bx_phy_address BX_CPU_C::translate_guest_physical(bx_phy_address guest_paddr, bx
}
}
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPT_ACCESS_DIRTY)) {
update_ept_access_dirty(entry_addr, entry, leaf, rw & 1);
}
Bit32u page_offset = PAGE_OFFSET(guest_paddr);
return ppf | page_offset;
}
// Access bit 8, Dirty bit 9
void BX_CPU_C::update_ept_access_dirty(bx_phy_address *entry_addr, Bit64u *entry, unsigned leaf, unsigned write)
{
// Update A bit if needed
for (unsigned level=BX_LEVEL_PML4; level > leaf; level--) {
if (!(entry[level] & 0x100)) {
entry[level] |= 0x100;
access_write_physical(entry_addr[level], 8, &entry[level]);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, entry_addr[level], 8, BX_WRITE,
(BX_EPT_PTE_ACCESS + level), (Bit8u*)(&entry[level]));
}
}
// Update A/D bits if needed
if (!(entry[leaf] & 0x100) || (write && !(entry[leaf] & 0x200))) {
entry[leaf] |= (0x100 | (write<<9)); // Update A and possibly D bits
access_write_physical(entry_addr[leaf], 8, &entry[leaf]);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, entry_addr[leaf], 8, BX_WRITE,
(BX_EPT_PTE_ACCESS + leaf), (Bit8u*)(&entry[leaf]));
}
}
#endif
#if BX_DEBUGGER || BX_DISASM || BX_INSTRUMENTATION || BX_GDBSTUB

View File

@ -592,12 +592,41 @@ void BX_CPU_C::init_vmx_capabilities(void)
cap->vmx_vmentry_ctrl_supported_bits |= VMX_VMENTRY_CTRL1_LOAD_EFER_MSR;
#endif
#if BX_SUPPORT_VMX >= 2
// EPT/VPID capabilities
// -----------------------------------------------------------
// [0] - BX_EPT_ENTRY_EXECUTE_ONLY support
// [6] - 4-levels EPT page walk length
// [8] - allow UC EPT paging structure memory type
// [14] - allow WB EPT paging structure memory type
// [16] - EPT 2M pages support
// [17] - EPT 1G pages support
// [20] - INVEPT instruction supported
// [21] - EPT A/D bits supported
// [25] - INVEPT single-context invalidation supported
// [26] - INVEPT all-context invalidation supported
// [32] - INVVPID instruction supported
// [40] - individual-address INVVPID is supported
// [41] - single-context INVVPID is supported
// [42] - all-context INVVPID is supported
// [43] - single-context-retaining-globals INVVPID is supported
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPT)) {
cap->vmx_ept_vpid_cap_supported_bits = BX_CONST64(0x06114141);
if (bx_cpuid_support_1g_paging())
cap->vmx_ept_vpid_cap_supported_bits |= (1 << 17);
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPT_ACCESS_DIRTY))
cap->vmx_ept_vpid_cap_supported_bits |= (1 << 21);
}
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_VPID))
cap->vmx_ept_vpid_cap_supported_bits |= BX_CONST64(0x00000f01) << 32;
// vm functions
// -----------------------------------------------------------
// [00] EPTP switching
// [63-01] reserved
#if BX_SUPPORT_VMX >= 2
cap->vmx_vmfunc_supported_bits = 0;
if (BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPTP_SWITCHING))
@ -606,6 +635,7 @@ void BX_CPU_C::init_vmx_capabilities(void)
// enable vm functions secondary vmexec control if needed
if (cap->vmx_vmfunc_supported_bits != 0)
cap->vmx_vmexec_ctrl2_supported_bits |= VMX_VM_EXEC_CTRL3_VMFUNC_ENABLE;
#endif
}

View File

@ -59,8 +59,6 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::VMFUNC(bxInstruction_c *i)
}
#if BX_SUPPORT_VMX >= 2
extern bx_bool is_eptptr_valid(Bit64u eptptr);
void BX_CPP_AttrRegparmN(1) BX_CPU_C::vmfunc_eptp_switching(bxInstruction_c *i)
{
Bit32u eptp_list_entry = ECX;

View File

@ -259,7 +259,7 @@ unsigned BX_CPU_C::VMXReadRevisionID(bx_phy_address pAddr)
}
#if BX_SUPPORT_VMX >= 2
bx_bool is_eptptr_valid(Bit64u eptptr)
bx_bool BX_CPU_C::is_eptptr_valid(Bit64u eptptr)
{
// [2:0] EPT paging-structure memory type
// 0 = Uncacheable (UC)
@ -271,8 +271,18 @@ bx_bool is_eptptr_valid(Bit64u eptptr)
Bit32u walk_length = (eptptr >> 3) & 7;
if (walk_length != 3) return 0;
#define BX_EPTPTR_RESERVED_BITS 0xfc0 /* bits 11:6 are reserved */
if (eptptr & BX_EPTPTR_RESERVED_BITS) return 0;
if (! BX_SUPPORT_VMX_EXTENSION(BX_VMX_EPT_ACCESS_DIRTY)) {
if (eptptr & 0x40) {
BX_ERROR(("is_eptptr_valid: EPTPTR A/D enabled when not supported by CPU"));
return 0;
}
}
#define BX_EPTPTR_RESERVED_BITS 0xf80 /* bits 11:5 are reserved */
if (eptptr & BX_EPTPTR_RESERVED_BITS) {
BX_ERROR(("is_eptptr_valid: EPTPTR reserved bits set"));
return 0;
}
if (! IsValidPhyAddr(eptptr)) return 0;
return 1;

View File

@ -526,6 +526,7 @@ typedef struct bx_VMX_Cap
Bit32u vmx_vmexit_ctrl_supported_bits;
Bit32u vmx_vmentry_ctrl_supported_bits;
#if BX_SUPPORT_VMX >= 2
Bit64u vmx_ept_vpid_cap_supported_bits;
Bit64u vmx_vmfunc_supported_bits;
#endif
} VMX_CAP;
@ -982,28 +983,8 @@ enum VMX_INVEPT_INVVPID_type {
BX_INVEPT_INVVPID_SINGLE_CONTEXT_NON_GLOBAL_INVALIDATION
};
// [0] - BX_EPT_ENTRY_EXECUTE_ONLY support
// [6] - 4-levels page walk length
// [8] - allow UC EPT paging structure memory type
// [14] - allow WB EPT paging structure memory type
// [16] - EPT 2M pages support
// [17] - EPT 1G pages support
// [20] - INVEPT instruction supported
// [25] - INVEPT single-context invalidation supported
// [26] - INVEPT all-context invalidation supported
#define VMX_MSR_VMX_EPT_VPID_CAP_LO (0x06114141 | (!!(bx_cpuid_support_1g_paging()) << 17))
// [32] - INVVPID instruction supported
// [40] - individual-address INVVPID is supported
// [41] - single-context INVVPID is supported
// [42] - all-context INVVPID is supported
// [43] - single-context-retaining-globals INVVPID is supported
#define VMX_MSR_VMX_EPT_VPID_CAP_HI (0x00000f01)
#define VMX_MSR_VMX_EPT_VPID_CAP \
((((Bit64u) VMX_MSR_VMX_EPT_VPID_CAP_HI) << 32) | VMX_MSR_VMX_EPT_VPID_CAP_LO)
(BX_CPU_THIS_PTR vmx_cap.vmx_ept_vpid_cap_supported_bits)
#endif