diff --git a/bochs/cpu/access.cc b/bochs/cpu/access.cc index b1010835d..344658b8c 100644 --- a/bochs/cpu/access.cc +++ b/bochs/cpu/access.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: access.cc,v 1.119 2008-09-06 17:44:02 sshwarts Exp $ +// $Id: access.cc,v 1.120 2008-09-08 20:47:33 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -39,7 +39,7 @@ BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned le #if BX_SUPPORT_X86_64 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) { // Mark cache as being OK type for succeeding reads/writes - seg->cache.valid |= SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G; + seg->cache.valid |= SegAccessROK | SegAccessWOK; return 1; } #endif @@ -80,9 +80,6 @@ BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned le // limit check in other functions, and we don't want the value to roll. // Only normal segments (not expand down) are handled this way. seg->cache.valid |= SegAccessROK | SegAccessWOK; - - if (seg->cache.u.segment.limit_scaled == 0xffffffff) - seg->cache.valid |= SegAccessROK4G | SegAccessWOK4G; } break; @@ -114,7 +111,7 @@ BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len #if BX_SUPPORT_X86_64 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) { // Mark cache as being OK type for succeeding reads/writes - seg->cache.valid |= SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G; + seg->cache.valid |= SegAccessROK | SegAccessWOK; return 1; } #endif @@ -144,8 +141,6 @@ BX_CPU_C::read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len // Mark cache as being OK type for succeeding reads. See notes for // write checks; similar code. seg->cache.valid |= SegAccessROK; - if (seg->cache.u.segment.limit_scaled == 0xffffffff) - seg->cache.valid |= SegAccessROK4G; } break; @@ -184,7 +179,7 @@ BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned #if BX_SUPPORT_X86_64 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) { // Mark cache as being OK type for succeeding reads/writes - seg->cache.valid |= SegAccessROK | SegAccessWOK | SegAccessROK4G | SegAccessWOK4G; + seg->cache.valid |= SegAccessROK | SegAccessWOK; return 1; } #endif @@ -214,8 +209,6 @@ BX_CPU_C::execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned // Mark cache as being OK type for succeeding reads. See notes for // write checks; similar code. seg->cache.valid |= SegAccessROK; - if (seg->cache.u.segment.limit_scaled == 0xffffffff) - seg->cache.valid |= SegAccessROK4G; } break; diff --git a/bochs/cpu/access32.cc b/bochs/cpu/access32.cc index ec7b8d6f3..33e2d8486 100755 --- a/bochs/cpu/access32.cc +++ b/bochs/cpu/access32.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: access32.cc,v 1.15 2008-09-08 15:45:56 sshwarts Exp $ +// $Id: access32.cc,v 1.16 2008-09-08 20:47:33 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (c) 2008 Stanislav Shwartsman @@ -35,38 +35,39 @@ BX_CPU_C::write_virtual_byte_32(unsigned s, Bit32u offset, Bit8u data) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessWOK4G) { -accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); -#if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); - Bit32u lpf = LPFOf(laddr); - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data); - Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); -#if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); -#endif - *hostAddr = data; - return; - } - } -#endif - access_write_linear(laddr, 1, CPL, (void *) &data); - return; - } - if (seg->cache.valid & SegAccessWOK) { - if (offset <= seg->cache.u.segment.limit_scaled) - goto accessOK; + if (offset <= seg->cache.u.segment.limit_scaled) { +accessOK: + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); +#if BX_SupportGuest2HostTLB + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); + Bit32u lpf = LPFOf(laddr); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data); + Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); +#if BX_SUPPORT_ICACHE + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); +#endif + *hostAddr = data; + return; + } + } +#endif + access_write_linear(laddr, 1, CPL, (void *) &data); + return; + } + else { + BX_ERROR(("write_virtual_byte_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!write_virtual_checks(seg, offset, 1)) @@ -83,58 +84,53 @@ BX_CPU_C::write_virtual_word_32(unsigned s, Bit32u offset, Bit16u data) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessWOK4G) { + if (seg->cache.valid & SegAccessWOK) { + if (offset < seg->cache.u.segment.limit_scaled) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 2, CPL, BX_WRITE, (Bit8u*) &data); - Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 2, CPL, BX_WRITE, (Bit8u*) &data); + Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif - WriteHostWordToLittleEndian(hostAddr, data); - return; + WriteHostWordToLittleEndian(hostAddr, data); + return; + } } - } #endif - // missed 4G limit check - if (offset == 0xffffffff) { - BX_ERROR(("write_virtual_word_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 1) { - BX_ERROR(("write_virtual_word_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 1) { + BX_ERROR(("write_virtual_word_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_write_linear(laddr, 2, CPL, (void *) &data); - return; - } - - if (seg->cache.valid & SegAccessWOK) { - if (offset < seg->cache.u.segment.limit_scaled) - goto accessOK; + access_write_linear(laddr, 2, CPL, (void *) &data); + return; + } + else { + BX_ERROR(("write_virtual_word_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!write_virtual_checks(seg, offset, 2)) @@ -151,58 +147,53 @@ BX_CPU_C::write_virtual_dword_32(unsigned s, Bit32u offset, Bit32u data) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessWOK4G) { + if (seg->cache.valid & SegAccessWOK) { + if (offset < (seg->cache.u.segment.limit_scaled-2)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 4, CPL, BX_WRITE, (Bit8u*) &data); - Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 4, CPL, BX_WRITE, (Bit8u*) &data); + Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif - WriteHostDWordToLittleEndian(hostAddr, data); - return; + WriteHostDWordToLittleEndian(hostAddr, data); + return; + } } - } #endif - // missed 4G limit check - if (offset >= 0xfffffffd) { - BX_ERROR(("write_virtual_dword_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 3) { - BX_ERROR(("write_virtual_dword_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 3) { + BX_ERROR(("write_virtual_dword_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_write_linear(laddr, 4, CPL, (void *) &data); - return; - } - - if (seg->cache.valid & SegAccessWOK) { - if (offset < (seg->cache.u.segment.limit_scaled-2)) - goto accessOK; + access_write_linear(laddr, 4, CPL, (void *) &data); + return; + } + else { + BX_ERROR(("write_virtual_dword_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!write_virtual_checks(seg, offset, 4)) @@ -219,58 +210,53 @@ BX_CPU_C::write_virtual_qword_32(unsigned s, Bit32u offset, Bit64u data) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessWOK4G) { + if (seg->cache.valid & SegAccessWOK) { + if (offset <= (seg->cache.u.segment.limit_scaled-7)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 8, CPL, BX_WRITE, (Bit8u*) &data); - Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 8, CPL, BX_WRITE, (Bit8u*) &data); + Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif - WriteHostQWordToLittleEndian(hostAddr, data); - return; + WriteHostQWordToLittleEndian(hostAddr, data); + return; + } } - } #endif - // missed 4G limit check - if (offset >= 0xfffffff8) { - BX_ERROR(("write_virtual_qword_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 7) { - BX_ERROR(("write_virtual_qword_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 7) { + BX_ERROR(("write_virtual_qword_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_write_linear(laddr, 8, CPL, (void *) &data); - return; - } - - if (seg->cache.valid & SegAccessWOK) { - if (offset <= (seg->cache.u.segment.limit_scaled-7)) - goto accessOK; + access_write_linear(laddr, 8, CPL, (void *) &data); + return; + } + else { + BX_ERROR(("write_virtual_qword_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!write_virtual_checks(seg, offset, 8)) @@ -287,59 +273,54 @@ BX_CPU_C::write_virtual_dqword_32(unsigned s, Bit32u offset, const BxPackedXmmRe BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessWOK4G) { + if (seg->cache.valid & SegAccessWOK) { + if (offset <= (seg->cache.u.segment.limit_scaled-15)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (15 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (15 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_WRITE); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_WRITE); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, CPL, BX_WRITE, (Bit8u*) data); - Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); + Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif - WriteHostQWordToLittleEndian(hostAddr, data->xmm64u(0)); - WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1)); - return; + WriteHostQWordToLittleEndian(hostAddr, data->xmm64u(0)); + WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1)); + return; + } } - } #endif - // missed 4G limit check - if (offset >= 0xffffffea) { - BX_ERROR(("write_virtual_dqword_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 15) { - BX_ERROR(("write_virtual_dqword_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 15) { + BX_ERROR(("write_virtual_dqword_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_write_linear(laddr, 16, CPL, (void *) data); - return; - } - - if (seg->cache.valid & SegAccessWOK) { - if (offset <= (seg->cache.u.segment.limit_scaled-15)) - goto accessOK; + access_write_linear(laddr, 16, CPL, (void *) data); + return; + } + else { + BX_ERROR(("write_virtual_dqword_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!write_virtual_checks(seg, offset, 16)) @@ -356,43 +337,44 @@ BX_CPU_C::write_virtual_dqword_aligned_32(unsigned s, Bit32u offset, const BxPac BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessWOK4G) { -accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); -#if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); - Bit32u lpf = AlignedAccessLPFOf(laddr, 15); - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_WRITE); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 16, CPL, BX_WRITE, (Bit8u*) data); - Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); -#if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); -#endif - WriteHostQWordToLittleEndian(hostAddr, data->xmm64u(0)); - WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1)); - return; - } - } -#endif - if (laddr & 15) { - BX_ERROR(("write_virtual_dqword_aligned_32(): #GP misaligned access")); - exception(BX_GP_EXCEPTION, 0, 0); - } - access_write_linear(laddr, 16, CPL, (void *) data); - return; - } - if (seg->cache.valid & SegAccessWOK) { - if (offset <= (seg->cache.u.segment.limit_scaled-15)) - goto accessOK; + if (offset <= (seg->cache.u.segment.limit_scaled-15)) { +accessOK: + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); +#if BX_SupportGuest2HostTLB + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); + Bit32u lpf = AlignedAccessLPFOf(laddr, 15); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_WRITE); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 16, CPL, BX_WRITE, (Bit8u*) data); + Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); +#if BX_SUPPORT_ICACHE + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); +#endif + WriteHostQWordToLittleEndian(hostAddr, data->xmm64u(0)); + WriteHostQWordToLittleEndian(hostAddr+1, data->xmm64u(1)); + return; + } + } +#endif + if (laddr & 15) { + BX_ERROR(("write_virtual_dqword_aligned_32(): #GP misaligned access")); + exception(BX_GP_EXCEPTION, 0, 0); + } + access_write_linear(laddr, 16, CPL, (void *) data); + return; + } + else { + BX_ERROR(("write_virtual_dqword_aligned_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!write_virtual_checks(seg, offset, 16)) @@ -410,35 +392,36 @@ BX_CPU_C::read_virtual_byte_32(unsigned s, Bit32u offset) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessROK4G) { -accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); -#if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); - Bit32u lpf = LPFOf(laddr); - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us read access - // from this CPL. - if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ); - Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); - data = *hostAddr; - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data); - return data; - } - } -#endif - access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data); - return data; - } - if (seg->cache.valid & SegAccessROK) { - if (offset <= seg->cache.u.segment.limit_scaled) - goto accessOK; + if (offset <= seg->cache.u.segment.limit_scaled) { +accessOK: + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); +#if BX_SupportGuest2HostTLB + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); + Bit32u lpf = LPFOf(laddr); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us read access + // from this CPL. + if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ); + Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); + data = *hostAddr; + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data); + return data; + } + } +#endif + access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data); + return data; + } + else { + BX_ERROR(("read_virtual_byte_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!read_virtual_checks(seg, offset, 1)) @@ -456,55 +439,50 @@ BX_CPU_C::read_virtual_word_32(unsigned s, Bit32u offset) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessROK4G) { + if (seg->cache.valid & SegAccessROK) { + if (offset < seg->cache.u.segment.limit_scaled) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us read access - // from this CPL. - if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_READ); - Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset); - ReadHostWordFromLittleEndian(hostAddr, data); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data); - return data; + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us read access + // from this CPL. + if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_READ); + Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset); + ReadHostWordFromLittleEndian(hostAddr, data); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data); + return data; + } } - } #endif - // missed 4G limit check - if (offset == 0xffffffff) { - BX_ERROR(("read_virtual_word_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 1) { - BX_ERROR(("read_virtual_word_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 1) { + BX_ERROR(("read_virtual_word_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_read_linear(laddr, 2, CPL, BX_READ, (void *) &data); - return data; - } - - if (seg->cache.valid & SegAccessROK) { - if (offset < seg->cache.u.segment.limit_scaled) - goto accessOK; + access_read_linear(laddr, 2, CPL, BX_READ, (void *) &data); + return data; + } + else { + BX_ERROR(("read_virtual_word_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!read_virtual_checks(seg, offset, 2)) @@ -522,55 +500,50 @@ BX_CPU_C::read_virtual_dword_32(unsigned s, Bit32u offset) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessROK4G) { + if (seg->cache.valid & SegAccessROK) { + if (offset < (seg->cache.u.segment.limit_scaled-2)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us read access - // from this CPL. - if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_READ); - Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); - ReadHostDWordFromLittleEndian(hostAddr, data); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data); - return data; + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us read access + // from this CPL. + if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_READ); + Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); + ReadHostDWordFromLittleEndian(hostAddr, data); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data); + return data; + } } - } #endif - // missed 4G limit check - if (offset >= 0xfffffffd) { - BX_ERROR(("read_virtual_dword_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 3) { - BX_ERROR(("read_virtual_dword_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 3) { + BX_ERROR(("read_virtual_dword_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_read_linear(laddr, 4, CPL, BX_READ, (void *) &data); - return data; - } - - if (seg->cache.valid & SegAccessROK) { - if (offset < (seg->cache.u.segment.limit_scaled-2)) - goto accessOK; + access_read_linear(laddr, 4, CPL, BX_READ, (void *) &data); + return data; + } + else { + BX_ERROR(("read_virtual_dword_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!read_virtual_checks(seg, offset, 4)) @@ -588,55 +561,50 @@ BX_CPU_C::read_virtual_qword_32(unsigned s, Bit32u offset) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessROK4G) { + if (seg->cache.valid & SegAccessROK) { + if (offset <= (seg->cache.u.segment.limit_scaled-7)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us read access - // from this CPL. - if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_READ); - Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); - ReadHostQWordFromLittleEndian(hostAddr, data); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data); - return data; + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us read access + // from this CPL. + if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_READ); + Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); + ReadHostQWordFromLittleEndian(hostAddr, data); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data); + return data; + } } - } #endif - // missed 4G limit check - if (offset >= 0xfffffff8) { - BX_ERROR(("read_virtual_qword_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 7) { - BX_ERROR(("read_virtual_qword_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 7) { + BX_ERROR(("read_virtual_qword_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_read_linear(laddr, 8, CPL, BX_READ, (void *) &data); - return data; - } - - if (seg->cache.valid & SegAccessROK) { - if (offset <= (seg->cache.u.segment.limit_scaled-7)) - goto accessOK; + access_read_linear(laddr, 8, CPL, BX_READ, (void *) &data); + return data; + } + else { + BX_ERROR(("read_virtual_qword_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!read_virtual_checks(seg, offset, 8)) @@ -653,56 +621,51 @@ BX_CPU_C::read_virtual_dqword_32(unsigned s, Bit32u offset, BxPackedXmmRegister BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessROK4G) { + if (seg->cache.valid & SegAccessROK) { + if (offset <= (seg->cache.u.segment.limit_scaled-15)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (15 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (15 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us read access - // from this CPL. - if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_READ); - Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); - ReadHostQWordFromLittleEndian(hostAddr, data->xmm64u(0)); - ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1)); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 16, CPL, BX_READ, (Bit8u*) data); - return; + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us read access + // from this CPL. + if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_READ); + Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); + ReadHostQWordFromLittleEndian(hostAddr, data->xmm64u(0)); + ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1)); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 16, CPL, BX_READ, (Bit8u*) data); + return; + } } - } #endif - // missed 4G limit check - if (offset >= 0xffffffea) { - BX_ERROR(("read_virtual_dqword_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 15) { - BX_ERROR(("read_virtual_dqword_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 15) { + BX_ERROR(("read_virtual_dqword_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_read_linear(laddr, 16, CPL, BX_READ, (void *) data); - return; - } - - if (seg->cache.valid & SegAccessROK) { - if (offset <= (seg->cache.u.segment.limit_scaled-15)) - goto accessOK; + access_read_linear(laddr, 16, CPL, BX_READ, (void *) data); + return; + } + else { + BX_ERROR(("read_virtual_dqword_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!read_virtual_checks(seg, offset, 16)) @@ -719,40 +682,41 @@ BX_CPU_C::read_virtual_dqword_aligned_32(unsigned s, Bit32u offset, BxPackedXmmR BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessROK4G) { -accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); -#if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); - Bit32u lpf = AlignedAccessLPFOf(laddr, 15); - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us read access - // from this CPL. - if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_READ); - Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); - ReadHostQWordFromLittleEndian(hostAddr, data->xmm64u(0)); - ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1)); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 16, CPL, BX_READ, (Bit8u*) data); - return; - } - } -#endif - if (laddr & 15) { - BX_ERROR(("read_virtual_dqword_aligned_32(): #GP misaligned access")); - exception(BX_GP_EXCEPTION, 0, 0); - } - access_read_linear(laddr, 16, CPL, BX_READ, (void *) data); - return; - } - if (seg->cache.valid & SegAccessROK) { - if (offset <= (seg->cache.u.segment.limit_scaled-15)) - goto accessOK; + if (offset <= (seg->cache.u.segment.limit_scaled-15)) { +accessOK: + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); +#if BX_SupportGuest2HostTLB + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); + Bit32u lpf = AlignedAccessLPFOf(laddr, 15); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us read access + // from this CPL. + if (! (tlbEntry->accessBits & USER_PL)) { // Read this pl OK. + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 16, BX_READ); + Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); + ReadHostQWordFromLittleEndian(hostAddr, data->xmm64u(0)); + ReadHostQWordFromLittleEndian(hostAddr+1, data->xmm64u(1)); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 16, CPL, BX_READ, (Bit8u*) data); + return; + } + } +#endif + if (laddr & 15) { + BX_ERROR(("read_virtual_dqword_aligned_32(): #GP misaligned access")); + exception(BX_GP_EXCEPTION, 0, 0); + } + access_read_linear(laddr, 16, CPL, BX_READ, (void *) data); + return; + } + else { + BX_ERROR(("read_virtual_dqword_aligned_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!read_virtual_checks(seg, offset, 16)) @@ -775,39 +739,40 @@ BX_CPU_C::read_RMW_virtual_byte_32(unsigned s, Bit32u offset) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessWOK4G) { -accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); -#if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); - Bit32u lpf = LPFOf(laddr); - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW); - Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); -#if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); -#endif - data = *hostAddr; - BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data); - return data; - } - } -#endif - access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data); - return data; - } - if (seg->cache.valid & SegAccessWOK) { - if (offset <= seg->cache.u.segment.limit_scaled) - goto accessOK; + if (offset <= seg->cache.u.segment.limit_scaled) { +accessOK: + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); +#if BX_SupportGuest2HostTLB + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); + Bit32u lpf = LPFOf(laddr); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW); + Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset); +#if BX_SUPPORT_ICACHE + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); +#endif + data = *hostAddr; + BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data); + return data; + } + } +#endif + access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data); + return data; + } + else { + BX_ERROR(("read_RMW_virtual_byte_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!write_virtual_checks(seg, offset, 1)) @@ -825,59 +790,54 @@ BX_CPU_C::read_RMW_virtual_word_32(unsigned s, Bit32u offset) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessWOK4G) { + if (seg->cache.valid & SegAccessWOK) { + if (offset < seg->cache.u.segment.limit_scaled) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_RW); - Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_RW); + Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif - ReadHostWordFromLittleEndian(hostAddr, data); - BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data); - return data; + ReadHostWordFromLittleEndian(hostAddr, data); + BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data); + return data; + } } - } #endif - // missed 4G limit check - if (offset == 0xffffffff) { - BX_ERROR(("read_RMW_virtual_word_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 1) { - BX_ERROR(("read_RMW_virtual_word_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 1) { + BX_ERROR(("read_RMW_virtual_word_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_read_linear(laddr, 2, CPL, BX_RW, (void *) &data); - return data; - } - - if (seg->cache.valid & SegAccessWOK) { - if (offset < seg->cache.u.segment.limit_scaled) - goto accessOK; + access_read_linear(laddr, 2, CPL, BX_RW, (void *) &data); + return data; + } + else { + BX_ERROR(("read_RMW_virtual_word_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!write_virtual_checks(seg, offset, 2)) @@ -895,59 +855,54 @@ BX_CPU_C::read_RMW_virtual_dword_32(unsigned s, Bit32u offset) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessWOK4G) { + if (seg->cache.valid & SegAccessWOK) { + if (offset < (seg->cache.u.segment.limit_scaled-2)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW); - Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW); + Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif - ReadHostDWordFromLittleEndian(hostAddr, data); - BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data); - return data; + ReadHostDWordFromLittleEndian(hostAddr, data); + BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data); + return data; + } } - } #endif - // missed 4G limit check - if (offset >= 0xfffffffd) { - BX_ERROR(("read_RMW_virtual_dword_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 3) { - BX_ERROR(("read_RMW_virtual_dword_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 3) { + BX_ERROR(("read_RMW_virtual_dword_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data); - return data; - } - - if (seg->cache.valid & SegAccessWOK) { - if (offset < (seg->cache.u.segment.limit_scaled-2)) - goto accessOK; + access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data); + return data; + } + else { + BX_ERROR(("read_RMW_virtual_dword_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!write_virtual_checks(seg, offset, 4)) @@ -965,59 +920,54 @@ BX_CPU_C::read_RMW_virtual_qword_32(unsigned s, Bit32u offset) BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - if (seg->cache.valid & SegAccessWOK4G) { + if (seg->cache.valid & SegAccessWOK) { + if (offset <= (seg->cache.u.segment.limit_scaled-7)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW); - Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | USER_PL))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW); + Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif - ReadHostQWordFromLittleEndian(hostAddr, data); - BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data); - return data; + ReadHostQWordFromLittleEndian(hostAddr, data); + BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr; + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data); + return data; + } } - } #endif - // missed 4G limit check - if (offset >= 0xfffffff8) { - BX_ERROR(("read_RMW_virtual_qword_32(): 4G segment limit violation")); - exception(int_number(s), 0, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check()) { - if (laddr & 7) { - BX_ERROR(("read_RMW_virtual_qword_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check()) { + if (laddr & 7) { + BX_ERROR(("read_RMW_virtual_qword_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data); - return data; - } - - if (seg->cache.valid & SegAccessWOK) { - if (offset <= (seg->cache.u.segment.limit_scaled-7)) - goto accessOK; + access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data); + return data; + } + else { + BX_ERROR(("read_RMW_virtual_qword_32(): segment limit violation")); + exception(int_number(s), 0, 0); + } } if (!write_virtual_checks(seg, offset, 8)) @@ -1198,59 +1148,55 @@ void BX_CPU_C::write_new_stack_word_32(bx_segment_reg_t *seg, Bit32u offset, uns { Bit32u laddr; - if (seg->cache.valid & SegAccessWOK4G) { + if (seg->cache.valid & SegAccessWOK) { + if (offset < seg->cache.u.segment.limit_scaled) { accessOK: - laddr = (Bit32u)(seg->cache.u.segment.base) + offset; - bx_bool user = (curr_pl == 3); + laddr = (Bit32u)(seg->cache.u.segment.base) + offset; + bx_bool user = (curr_pl == 3); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | user))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | user))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, curr_pl, BX_WRITE, (Bit8u*) &data); - Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset); + Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif - WriteHostWordToLittleEndian(hostAddr, data); - return; + WriteHostWordToLittleEndian(hostAddr, data); + return; + } } - } #endif - // missed 4G limit check - if (offset == 0xffffffff) { - BX_ERROR(("write_new_stack_word_32(): 4G segment limit violation")); - exception(BX_SS_EXCEPTION, seg->selector.value & 0xfffc, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check() && user) { - if (laddr & 1) { - BX_ERROR(("write_new_stack_word_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check() && user) { + if (laddr & 1) { + BX_ERROR(("write_new_stack_word_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_write_linear(laddr, 2, curr_pl, (void *) &data); - return; - } - - if (seg->cache.valid & SegAccessWOK) { - if (offset < seg->cache.u.segment.limit_scaled) - goto accessOK; + access_write_linear(laddr, 2, curr_pl, (void *) &data); + return; + } + else { + BX_ERROR(("write_new_stack_word_32(): segment limit violation")); + exception(BX_SS_EXCEPTION, + seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0, 0); + } } // add error code when segment violation occurs when pushing into new stack @@ -1265,59 +1211,55 @@ void BX_CPU_C::write_new_stack_dword_32(bx_segment_reg_t *seg, Bit32u offset, un { Bit32u laddr; - if (seg->cache.valid & SegAccessWOK4G) { + if (seg->cache.valid & SegAccessWOK) { + if (offset < (seg->cache.u.segment.limit_scaled-2)) { accessOK: - laddr = (Bit32u)(seg->cache.u.segment.base) + offset; - bx_bool user = (curr_pl == 3); + laddr = (Bit32u)(seg->cache.u.segment.base) + offset; + bx_bool user = (curr_pl == 3); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | user))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | user))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, curr_pl, BX_WRITE, (Bit8u*) &data); - Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); + Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif - WriteHostDWordToLittleEndian(hostAddr, data); - return; + WriteHostDWordToLittleEndian(hostAddr, data); + return; + } } - } #endif - // missed 4G limit check - if (offset >= 0xfffffffd) { - BX_ERROR(("write_new_stack_dword_32(): 4G segment limit violation")); - exception(BX_SS_EXCEPTION, seg->selector.value & 0xfffc, 0); - } - #if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check() && user) { - if (laddr & 3) { - BX_ERROR(("write_new_stack_dword_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); + if (BX_CPU_THIS_PTR alignment_check() && user) { + if (laddr & 3) { + BX_ERROR(("write_new_stack_dword_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } } - } #endif - access_write_linear(laddr, 4, curr_pl, (void *) &data); - return; - } - - if (seg->cache.valid & SegAccessWOK) { - if (offset < (seg->cache.u.segment.limit_scaled-2)) - goto accessOK; + access_write_linear(laddr, 4, curr_pl, (void *) &data); + return; + } + else { + BX_ERROR(("write_new_stack_dword_32(): segment limit violation")); + exception(BX_SS_EXCEPTION, + seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0, 0); + } } // add error code when segment violation occurs when pushing into new stack @@ -1332,60 +1274,55 @@ void BX_CPU_C::write_new_stack_qword_32(bx_segment_reg_t *seg, Bit32u offset, un { Bit32u laddr; - if (seg->cache.valid & SegAccessWOK4G) { + if (seg->cache.valid & SegAccessWOK) { + if (offset <= (seg->cache.u.segment.limit_scaled-7)) { accessOK: - laddr = (Bit32u)(seg->cache.u.segment.base) + offset; - bx_bool user = (curr_pl == 3); + laddr = (Bit32u)(seg->cache.u.segment.base) + offset; + bx_bool user = (curr_pl == 3); #if BX_SupportGuest2HostTLB - unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); + unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 - Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); + Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); #else - Bit32u lpf = LPFOf(laddr); + Bit32u lpf = LPFOf(laddr); #endif - bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; - if (tlbEntry->lpf == lpf) { - // See if the TLB entry privilege level allows us write access - // from this CPL. - if (! (tlbEntry->accessBits & (0x2 | user))) { - bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; - Bit32u pageOffset = PAGE_OFFSET(laddr); - BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE); - BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, - tlbEntry->ppf | pageOffset, 8, curr_pl, BX_WRITE, (Bit8u*) &data); - Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); + bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; + if (tlbEntry->lpf == lpf) { + // See if the TLB entry privilege level allows us write access + // from this CPL. + if (! (tlbEntry->accessBits & (0x2 | user))) { + bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr; + Bit32u pageOffset = PAGE_OFFSET(laddr); + BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE); + BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, + tlbEntry->ppf | pageOffset, 8, curr_pl, BX_WRITE, (Bit8u*) &data); + Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset); #if BX_SUPPORT_ICACHE - pageWriteStampTable.decWriteStamp(tlbEntry->ppf); + pageWriteStampTable.decWriteStamp(tlbEntry->ppf); #endif - WriteHostQWordToLittleEndian(hostAddr, data); - return; + WriteHostQWordToLittleEndian(hostAddr, data); + return; + } } - } #endif - // missed 4G limit check - if (offset >= 0xfffffff8) { - BX_ERROR(("write_new_stack_qword_32(): 4G segment limit violation")); +#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK + if (BX_CPU_THIS_PTR alignment_check() && user) { + if (laddr & 7) { + BX_ERROR(("write_new_stack_qword_32(): #AC misaligned access")); + exception(BX_AC_EXCEPTION, 0, 0); + } + } +#endif + + access_write_linear(laddr, 8, curr_pl, (void *) &data); + return; + } + else { + BX_ERROR(("write_new_stack_qword_32(): segment limit violation")); exception(BX_SS_EXCEPTION, seg->selector.rpl != CPL ? (seg->selector.value & 0xfffc) : 0, 0); } - -#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK - if (BX_CPU_THIS_PTR alignment_check() && user) { - if (laddr & 7) { - BX_ERROR(("write_new_stack_qword_32(): #AC misaligned access")); - exception(BX_AC_EXCEPTION, 0, 0); - } - } -#endif - - access_write_linear(laddr, 8, curr_pl, (void *) &data); - return; - } - - if (seg->cache.valid & SegAccessWOK) { - if (offset <= (seg->cache.u.segment.limit_scaled-7)) - goto accessOK; } // add error code when segment violation occurs when pushing into new stack diff --git a/bochs/cpu/descriptor.h b/bochs/cpu/descriptor.h index dc3494c82..2289a4b87 100755 --- a/bochs/cpu/descriptor.h +++ b/bochs/cpu/descriptor.h @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: descriptor.h,v 1.24 2008-05-26 18:02:07 sshwarts Exp $ +// $Id: descriptor.h,v 1.25 2008-09-08 20:47:33 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (c) 2007 Stanislav Shwartsman @@ -58,8 +58,6 @@ typedef struct #define SegValidCache (0x01) #define SegAccessROK (0x02) #define SegAccessWOK (0x04) -#define SegAccessROK4G (0x08) -#define SegAccessWOK4G (0x10) unsigned valid; // Holds above values, Or'd together. Used to // hold only 0 or 1. diff --git a/bochs/cpu/io.cc b/bochs/cpu/io.cc index 3bddac679..c8bcac362 100644 --- a/bochs/cpu/io.cc +++ b/bochs/cpu/io.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: io.cc,v 1.65 2008-08-03 19:53:08 sshwarts Exp $ +// $Id: io.cc,v 1.66 2008-09-08 20:47:33 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -53,8 +53,12 @@ Bit32u BX_CPU_C::FastRepINSW(bxInstruction_c *i, bx_address dstOff, Bit16u port, Bit8u *hostAddrDst; unsigned count; + BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); + bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES]; - if (!(dstSegPtr->cache.valid & SegAccessWOK4G)) + if (!(dstSegPtr->cache.valid & SegAccessWOK)) + return 0; + if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; bx_address laddrDst = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_ES, dstOff); @@ -137,8 +141,12 @@ Bit32u BX_CPU_C::FastRepOUTSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr Bit8u *hostAddrSrc; unsigned count; + BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); + bx_segment_reg_t *srcSegPtr = &BX_CPU_THIS_PTR sregs[srcSeg]; - if (!(srcSegPtr->cache.valid & SegAccessROK4G)) + if (!(srcSegPtr->cache.valid & SegAccessROK)) + return 0; + if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled) return 0; bx_address laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff); diff --git a/bochs/cpu/proc_ctrl.cc b/bochs/cpu/proc_ctrl.cc index 89dfbb168..fa38f4561 100644 --- a/bochs/cpu/proc_ctrl.cc +++ b/bochs/cpu/proc_ctrl.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: proc_ctrl.cc,v 1.258 2008-09-08 15:45:56 sshwarts Exp $ +// $Id: proc_ctrl.cc,v 1.259 2008-09-08 20:47:33 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -189,20 +189,30 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CLFLUSH(bxInstruction_c *i) bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[i->seg()]; bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i)); - - // check if we could access the memory segment - if (!(seg->cache.valid & SegAccessROK4G)) { - if (! execute_virtual_checks(seg, eaddr, 1)) - exception(int_number(i->seg()), 0, 0); - } - bx_address laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), eaddr); + #if BX_SUPPORT_X86_64 - if (! IsCanonical(laddr)) { - BX_ERROR(("CLFLUSH: non-canonical access !")); - exception(int_number(i->seg()), 0, 0); + if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) { + if (! IsCanonical(laddr)) { + BX_ERROR(("CLFLUSH: non-canonical access !")); + exception(int_number(i->seg()), 0, 0); + } } + else #endif + { + // check if we could access the memory segment + if (!(seg->cache.valid & SegAccessROK)) { + if (! execute_virtual_checks(seg, eaddr, 1)) + exception(int_number(i->seg()), 0, 0); + } + else { + if (eaddr > seg->cache.u.segment.limit_scaled) { + BX_ERROR(("CLFLUSH: segment limit violation")); + exception(int_number(i->seg()), 0, 0); + } + } + } bx_phy_address paddr; @@ -1917,8 +1927,9 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MONITOR(bxInstruction_c *i) exception(BX_GP_EXCEPTION, 0, 0); } - bx_address offset, laddr; - bx_phy_address paddr; + bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[i->seg()]; + + bx_address offset; #if BX_SUPPORT_X86_64 if (i->as64L()) { @@ -1933,14 +1944,33 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MONITOR(bxInstruction_c *i) offset = AX; } - // check if we could access the memory segment - if (!(seg->cache.valid & SegAccessROK4G)) { - if (! read_virtual_checks(&BX_CPU_THIS_PTR sregs[i->seg()], offset, 1)) + // set MONITOR + bx_address laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), offset); + +#if BX_SUPPORT_X86_64 + if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) { + if (! IsCanonical(laddr)) { + BX_ERROR(("MONITOR: non-canonical access !")); exception(int_number(i->seg()), 0, 0); + } + } + else +#endif + { + // check if we could access the memory segment + if (!(seg->cache.valid & SegAccessROK)) { + if (! read_virtual_checks(seg, offset, 1)) + exception(int_number(i->seg()), 0, 0); + } + else { + if (offset > seg->cache.u.segment.limit_scaled) { + BX_ERROR(("MONITOR: segment limit violation")); + exception(int_number(i->seg()), 0, 0); + } + } } - // set MONITOR - laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), offset); + bx_phy_address paddr; if (BX_CPU_THIS_PTR cr0.get_PG()) { paddr = dtranslate_linear(laddr, CPL, BX_READ); @@ -2048,7 +2078,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSENTER(bxInstruction_c *i) parse_selector(BX_CPU_THIS_PTR msr.sysenter_cs_msr & BX_SELECTOR_RPL_MASK, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */ @@ -2076,7 +2106,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSENTER(bxInstruction_c *i) parse_selector((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 8) & BX_SELECTOR_RPL_MASK, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 0; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */ @@ -2143,7 +2173,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSEXIT(bxInstruction_c *i) parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 32) & BX_SELECTOR_RPL_MASK) | 3, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 3; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */ @@ -2165,7 +2195,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSEXIT(bxInstruction_c *i) parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 16) & BX_SELECTOR_RPL_MASK) | 3, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 3; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */ @@ -2197,7 +2227,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSEXIT(bxInstruction_c *i) parse_selector(((BX_CPU_THIS_PTR msr.sysenter_cs_msr + (i->os64L() ? 40:24)) & BX_SELECTOR_RPL_MASK) | 3, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 3; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */ @@ -2249,7 +2279,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSCALL(bxInstruction_c *i) parse_selector((MSR_STAR >> 32) & BX_SELECTOR_RPL_MASK, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */ @@ -2273,7 +2303,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSCALL(bxInstruction_c *i) parse_selector(((MSR_STAR >> 32) + 8) & BX_SELECTOR_RPL_MASK, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 0; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */ @@ -2300,7 +2330,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSCALL(bxInstruction_c *i) parse_selector((MSR_STAR >> 32) & BX_SELECTOR_RPL_MASK, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */ @@ -2323,7 +2353,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSCALL(bxInstruction_c *i) parse_selector(((MSR_STAR >> 32) + 8) & BX_SELECTOR_RPL_MASK, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 0; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */ @@ -2377,7 +2407,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSRET(bxInstruction_c *i) parse_selector((((MSR_STAR >> 48) + 16) & BX_SELECTOR_RPL_MASK) | 3, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 3; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */ @@ -2397,7 +2427,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSRET(bxInstruction_c *i) parse_selector((MSR_STAR >> 48) | 3, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 3; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */ @@ -2424,7 +2454,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSRET(bxInstruction_c *i) parse_selector((Bit16u)((MSR_STAR >> 48) + 8), &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 3; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */ @@ -2437,7 +2467,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSRET(bxInstruction_c *i) parse_selector((MSR_STAR >> 48) | 3, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 3; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */ @@ -2460,7 +2490,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SYSRET(bxInstruction_c *i) parse_selector((Bit16u)((MSR_STAR >> 48) + 8), &BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.dpl = 3; BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.segment = 1; /* data/code segment */ diff --git a/bochs/cpu/smm.cc b/bochs/cpu/smm.cc index 52302a763..43c6be89b 100755 --- a/bochs/cpu/smm.cc +++ b/bochs/cpu/smm.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: smm.cc,v 1.44 2008-09-08 15:45:57 sshwarts Exp $ +// $Id: smm.cc,v 1.45 2008-09-08 20:47:33 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (c) 2006 Stanislav Shwartsman @@ -148,7 +148,7 @@ void BX_CPU_C::enter_system_management_mode(void) parse_selector(BX_CPU_THIS_PTR smbase >> 4, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.dpl = 0; BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.segment = 1; /* data/code segment */ @@ -175,7 +175,7 @@ void BX_CPU_C::enter_system_management_mode(void) parse_selector(0x0000, &BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector); - BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = SegValidCache | SegAccessROK4G | SegAccessWOK4G; + BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.valid = SegValidCache | SegAccessROK | SegAccessWOK; BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.p = 1; BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.dpl = 0; BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].cache.segment = 1; /* data/code segment */ diff --git a/bochs/cpu/string.cc b/bochs/cpu/string.cc index 653a8d075..c7f458e62 100644 --- a/bochs/cpu/string.cc +++ b/bochs/cpu/string.cc @@ -1,5 +1,5 @@ ///////////////////////////////////////////////////////////////////////// -// $Id: string.cc,v 1.63 2008-08-03 19:53:09 sshwarts Exp $ +// $Id: string.cc,v 1.64 2008-09-08 20:47:33 sshwarts Exp $ ///////////////////////////////////////////////////////////////////////// // // Copyright (C) 2001 MandrakeSoft S.A. @@ -49,12 +49,18 @@ Bit32u BX_CPU_C::FastRepMOVSB(bxInstruction_c *i, unsigned srcSeg, bx_address sr bx_address laddrDst, laddrSrc; Bit8u *hostAddrSrc, *hostAddrDst; + BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); + bx_segment_reg_t *srcSegPtr = &BX_CPU_THIS_PTR sregs[srcSeg]; - if (!(srcSegPtr->cache.valid & SegAccessROK4G)) + if (!(srcSegPtr->cache.valid & SegAccessROK)) + return 0; + if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled) return 0; bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg]; - if (!(dstSegPtr->cache.valid & SegAccessWOK4G)) + if (!(dstSegPtr->cache.valid & SegAccessWOK)) + return 0; + if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff); @@ -144,12 +150,18 @@ Bit32u BX_CPU_C::FastRepMOVSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr bx_address laddrDst, laddrSrc; Bit8u *hostAddrSrc, *hostAddrDst; + BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); + bx_segment_reg_t *srcSegPtr = &BX_CPU_THIS_PTR sregs[srcSeg]; - if (!(srcSegPtr->cache.valid & SegAccessROK4G)) + if (!(srcSegPtr->cache.valid & SegAccessROK)) + return 0; + if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled) return 0; bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg]; - if (!(dstSegPtr->cache.valid & SegAccessWOK4G)) + if (!(dstSegPtr->cache.valid & SegAccessWOK)) + return 0; + if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff); @@ -242,12 +254,18 @@ Bit32u BX_CPU_C::FastRepMOVSD(bxInstruction_c *i, unsigned srcSeg, bx_address sr bx_address laddrDst, laddrSrc; Bit8u *hostAddrSrc, *hostAddrDst; + BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); + bx_segment_reg_t *srcSegPtr = &BX_CPU_THIS_PTR sregs[srcSeg]; - if (!(srcSegPtr->cache.valid & SegAccessROK4G)) + if (!(srcSegPtr->cache.valid & SegAccessROK)) + return 0; + if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled) return 0; bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg]; - if (!(dstSegPtr->cache.valid & SegAccessWOK4G)) + if (!(dstSegPtr->cache.valid & SegAccessWOK)) + return 0; + if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff); @@ -340,8 +358,12 @@ Bit32u BX_CPU_C::FastRepSTOSB(bxInstruction_c *i, unsigned dstSeg, bx_address ds bx_address laddrDst; Bit8u *hostAddrDst; + BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); + bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg]; - if ((dstSegPtr->cache.valid & SegAccessWOK4G) != SegAccessWOK4G) + if (!(dstSegPtr->cache.valid & SegAccessWOK)) + return 0; + if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff); @@ -405,8 +427,12 @@ Bit32u BX_CPU_C::FastRepSTOSW(bxInstruction_c *i, unsigned dstSeg, bx_address ds bx_address laddrDst; Bit8u *hostAddrDst; + BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); + bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg]; - if ((dstSegPtr->cache.valid & SegAccessWOK4G) != SegAccessWOK4G) + if (!(dstSegPtr->cache.valid & SegAccessWOK)) + return 0; + if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff); @@ -472,8 +498,12 @@ Bit32u BX_CPU_C::FastRepSTOSD(bxInstruction_c *i, unsigned dstSeg, bx_address ds bx_address laddrDst; Bit8u *hostAddrDst; + BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); + bx_segment_reg_t *dstSegPtr = &BX_CPU_THIS_PTR sregs[dstSeg]; - if (!(dstSegPtr->cache.valid & SegAccessWOK4G)) + if (!(dstSegPtr->cache.valid & SegAccessWOK)) + return 0; + if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);