fixed VMX issue + small code reorg
This commit is contained in:
parent
5a9e4a98ff
commit
25ffaeeea8
@ -220,7 +220,7 @@ bxICacheEntry_c* BX_CPU_C::getICacheEntry(void)
|
||||
eipBiased = RIP + BX_CPU_THIS_PTR eipPageBias;
|
||||
}
|
||||
|
||||
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrPage + eipBiased;
|
||||
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrFetchPage + eipBiased;
|
||||
bxICacheEntry_c *entry = BX_CPU_THIS_PTR iCache.get_entry(pAddr, BX_CPU_THIS_PTR fetchModeMask);
|
||||
|
||||
InstrICache_Increment(iCacheLookups);
|
||||
@ -553,23 +553,23 @@ void BX_CPU_C::prefetch(void)
|
||||
Bit8u *fetchPtr = 0;
|
||||
|
||||
if ((tlbEntry->lpf == lpf) && !(tlbEntry->accessBits & (0x4 | USER_PL))) {
|
||||
BX_CPU_THIS_PTR pAddrPage = tlbEntry->ppf;
|
||||
BX_CPU_THIS_PTR pAddrFetchPage = tlbEntry->ppf;
|
||||
fetchPtr = (Bit8u*) tlbEntry->hostPageAddr;
|
||||
}
|
||||
else {
|
||||
bx_phy_address pAddr = translate_linear(laddr, USER_PL, BX_EXECUTE);
|
||||
BX_CPU_THIS_PTR pAddrPage = PPFOf(pAddr);
|
||||
BX_CPU_THIS_PTR pAddrFetchPage = PPFOf(pAddr);
|
||||
}
|
||||
|
||||
if (fetchPtr) {
|
||||
BX_CPU_THIS_PTR eipFetchPtr = fetchPtr;
|
||||
}
|
||||
else {
|
||||
BX_CPU_THIS_PTR eipFetchPtr = (const Bit8u*) getHostMemAddr(BX_CPU_THIS_PTR pAddrPage, BX_EXECUTE);
|
||||
BX_CPU_THIS_PTR eipFetchPtr = (const Bit8u*) getHostMemAddr(BX_CPU_THIS_PTR pAddrFetchPage, BX_EXECUTE);
|
||||
|
||||
// Sanity checks
|
||||
if (! BX_CPU_THIS_PTR eipFetchPtr) {
|
||||
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrPage + pageOffset;
|
||||
bx_phy_address pAddr = BX_CPU_THIS_PTR pAddrFetchPage + pageOffset;
|
||||
if (pAddr >= BX_MEM(0)->get_memory_len()) {
|
||||
BX_PANIC(("prefetch: running in bogus memory, pAddr=0x" FMT_PHY_ADDRX, pAddr));
|
||||
}
|
||||
|
@ -1117,7 +1117,7 @@ public: // for now...
|
||||
bx_address eipPageBias;
|
||||
Bit32u eipPageWindowSize;
|
||||
const Bit8u *eipFetchPtr;
|
||||
bx_phy_address pAddrPage; // Guest physical address of current instruction page
|
||||
bx_phy_address pAddrFetchPage; // Guest physical address of current instruction page
|
||||
|
||||
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
|
||||
unsigned alignment_check_mask;
|
||||
@ -4207,8 +4207,6 @@ public: // for now...
|
||||
BX_SMF BX_CPP_INLINE bx_bool long64_mode(void);
|
||||
BX_SMF BX_CPP_INLINE unsigned get_cpu_mode(void);
|
||||
|
||||
#define StackAddrSize64() long64_mode()
|
||||
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
BX_SMF BX_CPP_INLINE bx_bool alignment_check(void);
|
||||
#endif
|
||||
|
@ -123,14 +123,14 @@ bxICacheEntry_c* BX_CPU_C::serveICacheMiss(bxICacheEntry_c *entry, Bit32u eipBia
|
||||
entry->pAddr = ~entry->pAddr;
|
||||
entry->traceMask = 0x80000000; /* last line in page */
|
||||
pageWriteStampTable.markICacheMask(entry->pAddr, entry->traceMask);
|
||||
pageWriteStampTable.markICacheMask(BX_CPU_THIS_PTR pAddrPage, 0x1);
|
||||
pageWriteStampTable.markICacheMask(BX_CPU_THIS_PTR pAddrFetchPage, 0x1);
|
||||
|
||||
#if BX_SUPPORT_HANDLERS_CHAINING_SPEEDUPS
|
||||
entry->tlen++; /* Add the inserted end of trace opcode */
|
||||
genDummyICacheEntry(++i, &BX_CPU_C::BxEndTrace);
|
||||
#endif
|
||||
|
||||
BX_CPU_THIS_PTR iCache.commit_page_split_trace(BX_CPU_THIS_PTR pAddrPage, entry);
|
||||
BX_CPU_THIS_PTR iCache.commit_page_split_trace(BX_CPU_THIS_PTR pAddrFetchPage, entry);
|
||||
return entry;
|
||||
}
|
||||
|
||||
|
@ -341,7 +341,7 @@ BX_CPU_C::long_iret(bxInstruction_c *i)
|
||||
* EIP eSP+0
|
||||
*/
|
||||
|
||||
if (StackAddrSize64()) temp_RSP = RSP;
|
||||
if (long64_mode()) temp_RSP = RSP;
|
||||
else {
|
||||
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) temp_RSP = ESP;
|
||||
else temp_RSP = SP;
|
||||
@ -538,7 +538,7 @@ BX_CPU_C::long_iret(bxInstruction_c *i)
|
||||
load_null_selector(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS], raw_ss_selector);
|
||||
}
|
||||
|
||||
if (StackAddrSize64()) RSP = new_rsp;
|
||||
if (long64_mode()) RSP = new_rsp;
|
||||
else {
|
||||
if (ss_descriptor.u.segment.d_b) ESP = (Bit32u) new_rsp;
|
||||
else SP = (Bit16u) new_rsp;
|
||||
|
@ -46,7 +46,7 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
|
||||
/* + 0: IP | + 0: EIP | + 0: RIP */
|
||||
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (StackAddrSize64()) temp_RSP = RSP;
|
||||
if (long64_mode()) temp_RSP = RSP;
|
||||
else
|
||||
#endif
|
||||
{
|
||||
@ -106,7 +106,7 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
|
||||
branch_far64(&cs_selector, &cs_descriptor, return_RIP, CPL);
|
||||
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (StackAddrSize64())
|
||||
if (long64_mode())
|
||||
RSP += stack_param_offset + pop_bytes;
|
||||
else
|
||||
#endif
|
||||
@ -213,7 +213,7 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
|
||||
load_null_selector(&BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS], raw_ss_selector);
|
||||
}
|
||||
|
||||
if (StackAddrSize64())
|
||||
if (long64_mode())
|
||||
RSP = return_RSP + pop_bytes;
|
||||
else
|
||||
#endif
|
||||
|
@ -27,9 +27,8 @@
|
||||
BX_CPP_INLINE void BX_CPP_AttrRegparmN(1)
|
||||
BX_CPU_C::push_16(Bit16u value16)
|
||||
{
|
||||
/* must use StackAddrSize, and either RSP, ESP or SP accordingly */
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (StackAddrSize64()) {
|
||||
if (long64_mode()) { /* StackAddrSize = 64 */
|
||||
write_virtual_word_64(BX_SEG_REG_SS, RSP-2, value16);
|
||||
RSP -= 2;
|
||||
}
|
||||
@ -39,7 +38,7 @@ BX_CPU_C::push_16(Bit16u value16)
|
||||
write_virtual_word_32(BX_SEG_REG_SS, (Bit32u) (ESP-2), value16);
|
||||
ESP -= 2;
|
||||
}
|
||||
else
|
||||
else /* StackAddrSize = 16 */
|
||||
{
|
||||
write_virtual_word_32(BX_SEG_REG_SS, (Bit16u) (SP-2), value16);
|
||||
SP -= 2;
|
||||
@ -49,9 +48,8 @@ BX_CPU_C::push_16(Bit16u value16)
|
||||
BX_CPP_INLINE void BX_CPP_AttrRegparmN(1)
|
||||
BX_CPU_C::push_32(Bit32u value32)
|
||||
{
|
||||
/* must use StackAddrSize, and either RSP, ESP or SP accordingly */
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (StackAddrSize64()) {
|
||||
if (long64_mode()) { /* StackAddrSize = 64 */
|
||||
write_virtual_dword_64(BX_SEG_REG_SS, RSP-4, value32);
|
||||
RSP -= 4;
|
||||
}
|
||||
@ -61,7 +59,7 @@ BX_CPU_C::push_32(Bit32u value32)
|
||||
write_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (ESP-4), value32);
|
||||
ESP -= 4;
|
||||
}
|
||||
else
|
||||
else /* StackAddrSize = 16 */
|
||||
{
|
||||
write_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (SP-4), value32);
|
||||
SP -= 4;
|
||||
@ -73,6 +71,7 @@ BX_CPU_C::push_32(Bit32u value32)
|
||||
BX_CPP_INLINE void BX_CPP_AttrRegparmN(1)
|
||||
BX_CPU_C::push_64(Bit64u value64)
|
||||
{
|
||||
/* StackAddrSize = 64 */
|
||||
write_virtual_qword_64(BX_SEG_REG_SS, RSP-8, value64);
|
||||
RSP -= 8;
|
||||
}
|
||||
@ -84,17 +83,17 @@ BX_CPP_INLINE Bit16u BX_CPU_C::pop_16(void)
|
||||
Bit16u value16;
|
||||
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (StackAddrSize64()) {
|
||||
if (long64_mode()) { /* StackAddrSize = 64 */
|
||||
value16 = read_virtual_word_64(BX_SEG_REG_SS, RSP);
|
||||
RSP += 2;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) {
|
||||
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) { /* StackAddrSize = 32 */
|
||||
value16 = read_virtual_word_32(BX_SEG_REG_SS, ESP);
|
||||
ESP += 2;
|
||||
}
|
||||
else {
|
||||
else { /* StackAddrSize = 16 */
|
||||
value16 = read_virtual_word_32(BX_SEG_REG_SS, SP);
|
||||
SP += 2;
|
||||
}
|
||||
@ -108,17 +107,17 @@ BX_CPP_INLINE Bit32u BX_CPU_C::pop_32(void)
|
||||
Bit32u value32;
|
||||
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (StackAddrSize64()) {
|
||||
if (long64_mode()) { /* StackAddrSize = 64 */
|
||||
value32 = read_virtual_dword_64(BX_SEG_REG_SS, RSP);
|
||||
RSP += 4;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) {
|
||||
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) { /* StackAddrSize = 32 */
|
||||
value32 = read_virtual_dword_32(BX_SEG_REG_SS, ESP);
|
||||
ESP += 4;
|
||||
}
|
||||
else {
|
||||
else { /* StackAddrSize = 16 */
|
||||
value32 = read_virtual_dword_32(BX_SEG_REG_SS, SP);
|
||||
SP += 4;
|
||||
}
|
||||
@ -130,6 +129,7 @@ BX_CPP_INLINE Bit32u BX_CPU_C::pop_32(void)
|
||||
#if BX_SUPPORT_X86_64
|
||||
BX_CPP_INLINE Bit64u BX_CPU_C::pop_64(void)
|
||||
{
|
||||
/* StackAddrSize = 64 */
|
||||
Bit64u value64 = read_virtual_qword_64(BX_SEG_REG_SS, RSP);
|
||||
RSP += 8;
|
||||
|
||||
|
@ -1841,8 +1841,16 @@ void BX_CPU_C::VMexitSaveGuestState(void)
|
||||
if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_PAT_MSR)
|
||||
VMwrite64(VMCS_64BIT_GUEST_IA32_PAT, BX_CPU_THIS_PTR msr.pat);
|
||||
#if BX_SUPPORT_X86_64
|
||||
if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_EFER_MSR)
|
||||
if (vm->vmexit_ctrls & VMX_VMEXIT_CTRL1_STORE_EFER_MSR) {
|
||||
VMwrite64(VMCS_64BIT_GUEST_IA32_EFER, BX_CPU_THIS_PTR efer.get32());
|
||||
|
||||
// store the value of EFER.LMA back into the VMX_VMENTRY_CTRL1_X86_64_GUEST VM-Entry control
|
||||
if (BX_CPU_THIS_PTR efer.get_LMA())
|
||||
vm->vmentry_ctrls |= VMX_VMENTRY_CTRL1_X86_64_GUEST;
|
||||
else
|
||||
vm->vmentry_ctrls &= ~VMX_VMENTRY_CTRL1_X86_64_GUEST;
|
||||
VMwrite32(VMCS_32BIT_CONTROL_VMENTRY_CONTROLS, vm->vmentry_ctrls);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user