re-commit changes from SVN rev11026 which were accidentially undo'ed by last Volker's commit

This commit is contained in:
Stanislav Shwartsman 2012-02-12 19:13:57 +00:00
parent de94b08a1a
commit 0b5f798af1
5 changed files with 28 additions and 79 deletions

View File

@ -1149,7 +1149,6 @@ public: // for now...
#if BX_CPU_LEVEL >= 6
struct {
bx_bool valid;
Bit64u entry[4];
} PDPTR_CACHE;
#endif

View File

@ -1049,9 +1049,6 @@ void BX_CPU_C::reset(unsigned source)
BX_CPU_THIS_PTR errorno = 0;
TLB_flush();
#if BX_CPU_LEVEL >= 6
BX_CPU_THIS_PTR PDPTR_CACHE.valid = 0;
#endif
// invalidate the prefetch queue
BX_CPU_THIS_PTR eipPageBias = 0;

View File

@ -687,7 +687,7 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
bx_bool nx_fault = 0;
int leaf;
lpf_mask = 0xfff;
Bit64u offset_mask = BX_CONST64(0x0000ffffffffffff);
combined_access = 0x06;
for (leaf = BX_LEVEL_PML4;; --leaf) {
@ -700,6 +700,7 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
#endif
access_read_physical(entry_addr[leaf], 8, &entry[leaf]);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, entry_addr[leaf], 8, BX_READ, (BX_PTE_ACCESS + leaf), (Bit8u*)(&entry[leaf]));
offset_mask >>= 9;
Bit64u curr_entry = entry[leaf];
int fault = check_entry_PAE(bx_paging_level[leaf], curr_entry, rw, &nx_fault);
@ -717,29 +718,16 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
if (leaf == BX_LEVEL_PDPTE) {
if (curr_entry & PAGING_PAE_PDPTE1G_RESERVED_BITS) {
BX_DEBUG(("PAE PDPE1G: reserved bit is set: PDPE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
// Make up the physical page frame address.
ppf = (bx_phy_address)((curr_entry & BX_CONST64(0x000fffffc0000000)) | (laddr & 0x3ffff000));
lpf_mask = 0x3fffffff;
break;
ppf &= BX_CONST64(0x000fffffffffe000);
if (ppf & offset_mask) {
BX_DEBUG(("PAE %s: reserved bit is set: 0x" FMT_ADDRX64, bx_paging_level[leaf], curr_entry));
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
if (leaf == BX_LEVEL_PDE) {
if (curr_entry & PAGING_PAE_PDE2M_RESERVED_BITS) {
BX_DEBUG(("PAE PDE2M: reserved bit is set PDE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
// Make up the physical page frame address.
ppf = (bx_phy_address)((curr_entry & BX_CONST64(0x000fffffffe00000)) | (laddr & 0x001ff000));
lpf_mask = 0x1fffff;
break;
}
// Make up the physical page frame address
ppf += (bx_phy_address)(laddr & LPFOf(offset_mask));
lpf_mask = offset_mask;
break;
}
}
@ -800,8 +788,6 @@ bx_phy_address BX_CPU_C::translate_linear_long_mode(bx_address laddr, Bit32u &lp
bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::CheckPDPTR(bx_phy_address cr3_val)
{
BX_CPU_THIS_PTR PDPTR_CACHE.valid = 0;
// with Nested Paging PDPTRs are not loaded for guest page tables but
// accessed on demand as part of the guest page walk
#if BX_SUPPORT_SVM
@ -836,8 +822,6 @@ bx_bool BX_CPP_AttrRegparmN(1) BX_CPU_C::CheckPDPTR(bx_phy_address cr3_val)
for (n=0; n<4; n++)
BX_CPU_THIS_PTR PDPTR_CACHE.entry[n] = pdptr[n];
BX_CPU_THIS_PTR PDPTR_CACHE.valid = 1;
return 1; /* PDPTRs are fine */
}
@ -874,13 +858,6 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
else
#endif
{
if (! BX_CPU_THIS_PTR PDPTR_CACHE.valid) {
BX_PANIC(("PDPTR_CACHE not valid !"));
if (! CheckPDPTR(BX_CPU_THIS_PTR cr3)) {
BX_ERROR(("translate_linear_PAE(): PDPTR check failed !"));
exception(BX_GP_EXCEPTION, 0);
}
}
pdpte = BX_CPU_THIS_PTR PDPTR_CACHE.entry[(laddr >> 30) & 3];
}
@ -915,11 +892,11 @@ bx_phy_address BX_CPU_C::translate_linear_PAE(bx_address laddr, Bit32u &lpf_mask
// Ignore CR4.PSE in PAE mode
if (curr_entry & 0x80) {
if (curr_entry & PAGING_PAE_PDE2M_RESERVED_BITS) {
BX_DEBUG(("PAE PDE2M: reserved bit is set PDE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
BX_DEBUG(("PAE PDE2M: reserved bit is set PDE=0x" FMT_ADDRX64, curr_entry));
page_fault(ERROR_RESERVED | ERROR_PROTECTION, laddr, user, rw);
}
// Make up the physical page frame address.
// Make up the physical page frame address
ppf = (bx_phy_address)((curr_entry & BX_CONST64(0x000fffffffe00000)) | (laddr & 0x001ff000));
lpf_mask = 0x1fffff;
break;
@ -1234,10 +1211,12 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned user, unsig
bx_phy_address BX_CPU_C::translate_guest_physical(bx_phy_address guest_paddr, bx_address guest_laddr, bx_bool guest_laddr_valid, bx_bool is_page_walk, unsigned rw)
{
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
bx_phy_address entry_addr[4], ppf = 0, pbase = LPFOf(vm->eptptr);
bx_phy_address entry_addr[4], ppf = LPFOf(vm->eptptr);
Bit64u entry[4];
int leaf;
Bit32u combined_access = 0x7, access_mask = 0;
Bit64u offset_mask = BX_CONST64(0x0000ffffffffffff);
BX_DEBUG(("EPT walk for guest paddr 0x" FMT_ADDRX, guest_paddr));
@ -1248,11 +1227,12 @@ bx_phy_address BX_CPU_C::translate_guest_physical(bx_phy_address guest_paddr, bx
Bit32u vmexit_reason = 0, vmexit_qualification = access_mask;
for (leaf = BX_LEVEL_PML4;; --leaf) {
entry_addr[leaf] = pbase + ((guest_paddr >> (9 + 9*leaf)) & 0xff8);
entry_addr[leaf] = ppf + ((guest_paddr >> (9 + 9*leaf)) & 0xff8);
access_read_physical(entry_addr[leaf], 8, &entry[leaf]);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID, entry_addr[leaf], 8, BX_READ,
(BX_EPT_PTE_ACCESS + leaf), (Bit8u*)(&entry[leaf]));
offset_mask >>= 9;
Bit64u curr_entry = entry[leaf];
Bit32u curr_access_mask = curr_entry & 0x7;
@ -1279,19 +1259,14 @@ bx_phy_address BX_CPU_C::translate_guest_physical(bx_phy_address guest_paddr, bx
}
if (curr_entry & PAGING_EPT_RESERVED_BITS) {
BX_DEBUG(("EPT %s: reserved bit is set %08x:%08x",
bx_paging_level[leaf], GET32H(curr_entry), GET32L(curr_entry)));
BX_DEBUG(("EPT %s: reserved bit is set 0x" FMT_ADDRX64, bx_paging_level[leaf], curr_entry));
vmexit_reason = VMX_VMEXIT_EPT_MISCONFIGURATION;
break;
}
pbase = curr_entry & BX_CONST64(0x000ffffffffff000);
ppf = curr_entry & BX_CONST64(0x000ffffffffff000);
if (leaf == BX_LEVEL_PTE) {
// Make up the physical page frame address.
ppf = (bx_phy_address)(curr_entry & BX_CONST64(0x000ffffffffff000));
break;
}
if (leaf == BX_LEVEL_PTE) break;
if (curr_entry & 0x80) {
if (leaf > (BX_LEVEL_PDE + !!bx_cpuid_support_1g_paging())) {
@ -1300,29 +1275,16 @@ bx_phy_address BX_CPU_C::translate_guest_physical(bx_phy_address guest_paddr, bx
break;
}
if (leaf == BX_LEVEL_PDPTE) {
if (curr_entry & PAGING_PAE_PDPTE1G_RESERVED_BITS) {
BX_DEBUG(("EPT PDPE1G: reserved bit is set: PDPE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
vmexit_reason = VMX_VMEXIT_EPT_VIOLATION;
break;
}
// Make up the physical page frame address.
ppf = (bx_phy_address)((curr_entry & BX_CONST64(0x000fffffc0000000)) | (guest_paddr & 0x3ffff000));
break;
ppf &= BX_CONST64(0x000fffffffffe000);
if (ppf & offset_mask) {
BX_DEBUG(("EPT %s: reserved bit is set: 0x" FMT_ADDRX64, bx_paging_level[leaf], curr_entry));
vmexit_reason = VMX_VMEXIT_EPT_VIOLATION;
break;
}
if (leaf == BX_LEVEL_PDE) {
if (curr_entry & PAGING_PAE_PDE2M_RESERVED_BITS) {
BX_DEBUG(("EPT PDE2M: reserved bit is set PDE=%08x:%08x", GET32H(curr_entry), GET32L(curr_entry)));
vmexit_reason = VMX_VMEXIT_EPT_VIOLATION;
break;
}
// Make up the physical page frame address.
ppf = (bx_phy_address)((curr_entry & BX_CONST64(0x000fffffffe00000)) | (guest_paddr & 0x001ff000));
break;
}
// Make up the physical page frame address
ppf += (bx_phy_address)(guest_paddr & offset_mask);
break;
}
}
@ -1485,8 +1447,6 @@ bx_bool BX_CPU_C::dbg_xlate_linear2phy(bx_address laddr, bx_phy_address *phy, bx
int level = 3;
if (! long_mode()) {
if (! BX_CPU_THIS_PTR PDPTR_CACHE.valid)
goto page_fault;
pt_address = BX_CPU_THIS_PTR PDPTR_CACHE.entry[(laddr >> 30) & 3];
if (! (pt_address & 0x1))
goto page_fault;

View File

@ -579,8 +579,6 @@ bx_bool BX_CPU_C::SvmEnterLoadCheckGuestState(void)
handleAvxModeChange();
#endif
if (SVM_NESTED_PAGING_ENABLED) BX_CPU_THIS_PTR PDPTR_CACHE.valid = 0;
BX_INSTR_TLB_CNTRL(BX_CPU_ID, BX_INSTR_CONTEXT_SWITCH, 0);
return 1;

View File

@ -1532,7 +1532,6 @@ Bit32u BX_CPU_C::VMenterLoadCheckGuestState(Bit64u *qualification)
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
// load PDPTR only in PAE legacy mode
if (BX_CPU_THIS_PTR cr0.get_PG() && BX_CPU_THIS_PTR cr4.get_PAE() && !x86_64_guest) {
BX_CPU_THIS_PTR PDPTR_CACHE.valid = 1;
for (n = 0; n < 4; n++)
BX_CPU_THIS_PTR PDPTR_CACHE.entry[n] = guest.pdptr[n];
}
@ -1778,10 +1777,6 @@ void BX_CPU_C::VMexitSaveGuestState(void)
if (vm->vmexec_ctrls3 & VMX_VM_EXEC_CTRL3_EPT_ENABLE) {
// save only if guest running in legacy PAE mode
if (BX_CPU_THIS_PTR cr0.get_PG() && BX_CPU_THIS_PTR cr4.get_PAE() && !long_mode()) {
if (! BX_CPU_THIS_PTR PDPTR_CACHE.valid) {
if (! CheckPDPTR(BX_CPU_THIS_PTR cr3))
BX_PANIC(("VMEXIT: PDPTR cache is not valid !"));
}
for(n=0; n<4; n++) {
VMwrite64(VMCS_64BIT_GUEST_IA32_PDPTE0 + 2*n, BX_CPU_THIS_PTR PDPTR_CACHE.entry[n]);
}