From 279c61dc6765d12e0517a4ce0a682d89c5f4c37f Mon Sep 17 00:00:00 2001 From: Stanislav Shwartsman Date: Wed, 28 Mar 2012 21:11:19 +0000 Subject: [PATCH] updated + fixed instrumentation example for instr histogram, code cleanup in the cpu --- bochs/cpu/access32.cc | 40 ++++++++++++------------- bochs/cpu/access64.cc | 40 ++++++++++++------------- bochs/cpu/cpu.cc | 4 +-- bochs/cpu/debugstuff.cc | 2 +- bochs/cpu/io.cc | 4 +-- bochs/cpu/paging.cc | 2 ++ bochs/cpu/proc_ctrl.cc | 4 +-- bochs/cpu/stack.cc | 2 +- bochs/cpu/string.cc | 18 +++++------ bochs/cpu/vmexit.cc | 6 ++-- bochs/instrument/example2/instrument.cc | 24 ++++++++++----- 11 files changed, 78 insertions(+), 68 deletions(-) diff --git a/bochs/cpu/access32.cc b/bochs/cpu/access32.cc index 660ceecb9..2f92e2527 100644 --- a/bochs/cpu/access32.cc +++ b/bochs/cpu/access32.cc @@ -38,7 +38,7 @@ BX_CPU_C::write_virtual_byte_32(unsigned s, Bit32u offset, Bit8u data) if (seg->cache.valid & SegAccessWOK) { if (offset <= seg->cache.u.segment.limit_scaled) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); Bit32u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -82,7 +82,7 @@ BX_CPU_C::write_virtual_word_32(unsigned s, Bit32u offset, Bit16u data) if (seg->cache.valid & SegAccessWOK) { if (offset < seg->cache.u.segment.limit_scaled) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -140,7 +140,7 @@ BX_CPU_C::write_virtual_dword_32(unsigned s, Bit32u offset, Bit32u data) if (seg->cache.valid & SegAccessWOK) { if (offset < (seg->cache.u.segment.limit_scaled-2)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -198,7 +198,7 @@ BX_CPU_C::write_virtual_qword_32(unsigned s, Bit32u offset, Bit64u data) if (seg->cache.valid & SegAccessWOK) { if (offset <= (seg->cache.u.segment.limit_scaled-7)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -258,7 +258,7 @@ BX_CPU_C::write_virtual_dqword_32(unsigned s, Bit32u offset, const BxPackedXmmRe if (seg->cache.valid & SegAccessWOK) { if (offset <= (seg->cache.u.segment.limit_scaled-15)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); Bit32u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -300,7 +300,7 @@ BX_CPU_C::write_virtual_dqword_aligned_32(unsigned s, Bit32u offset, const BxPac BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - Bit32u laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + Bit32u laddr = get_laddr32(s, offset); // must check alignment here because #GP on misaligned access is higher // priority than other segment related faults if (laddr & 15) { @@ -359,7 +359,7 @@ void BX_CPU_C::write_virtual_dword_vector_32(unsigned s, Bit32u offset, unsigned if (seg->cache.valid & SegAccessWOK) { if (offset < (seg->cache.u.segment.limit_scaled-len)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1); Bit32u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -405,7 +405,7 @@ void BX_CPU_C::write_virtual_dword_vector_aligned_32(unsigned s, Bit32u offset, BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - Bit32u laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + Bit32u laddr = get_laddr32(s, offset); // must check alignment here because #GP on misaligned access is higher // priority than other segment related faults if (laddr & (len-1)) { @@ -467,7 +467,7 @@ BX_CPU_C::read_virtual_byte_32(unsigned s, Bit32u offset) if (seg->cache.valid & SegAccessROK) { if (offset <= seg->cache.u.segment.limit_scaled) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); Bit32u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -511,7 +511,7 @@ BX_CPU_C::read_virtual_word_32(unsigned s, Bit32u offset) if (seg->cache.valid & SegAccessROK) { if (offset < seg->cache.u.segment.limit_scaled) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -568,7 +568,7 @@ BX_CPU_C::read_virtual_dword_32(unsigned s, Bit32u offset) if (seg->cache.valid & SegAccessROK) { if (offset < (seg->cache.u.segment.limit_scaled-2)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -625,7 +625,7 @@ BX_CPU_C::read_virtual_qword_32(unsigned s, Bit32u offset) if (seg->cache.valid & SegAccessROK) { if (offset <= (seg->cache.u.segment.limit_scaled-7)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -683,7 +683,7 @@ BX_CPU_C::read_virtual_dqword_32(unsigned s, Bit32u offset, BxPackedXmmRegister if (seg->cache.valid & SegAccessROK) { if (offset <= (seg->cache.u.segment.limit_scaled-15)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); Bit32u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -722,7 +722,7 @@ BX_CPU_C::read_virtual_dqword_aligned_32(unsigned s, Bit32u offset, BxPackedXmmR BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - Bit32u laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + Bit32u laddr = get_laddr32(s, offset); // must check alignment here because #GP on misaligned access is higher // priority than other segment related faults if (laddr & 15) { @@ -779,7 +779,7 @@ void BX_CPU_C::read_virtual_dword_vector_32(unsigned s, Bit32u offset, unsigned if (seg->cache.valid & SegAccessROK) { if (offset < (seg->cache.u.segment.limit_scaled-len)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1); Bit32u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -822,7 +822,7 @@ void BX_CPU_C::read_virtual_dword_vector_aligned_32(unsigned s, Bit32u offset, u BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64); - Bit32u laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + Bit32u laddr = get_laddr32(s, offset); // must check alignment here because #GP on misaligned access is higher // priority than other segment related faults if (laddr & (len-1)) { @@ -887,7 +887,7 @@ BX_CPU_C::read_RMW_virtual_byte_32(unsigned s, Bit32u offset) if (seg->cache.valid & SegAccessWOK) { if (offset <= seg->cache.u.segment.limit_scaled) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); Bit32u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -934,7 +934,7 @@ BX_CPU_C::read_RMW_virtual_word_32(unsigned s, Bit32u offset) if (seg->cache.valid & SegAccessWOK) { if (offset < seg->cache.u.segment.limit_scaled) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -995,7 +995,7 @@ BX_CPU_C::read_RMW_virtual_dword_32(unsigned s, Bit32u offset) if (seg->cache.valid & SegAccessWOK) { if (offset < (seg->cache.u.segment.limit_scaled-2)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -1056,7 +1056,7 @@ BX_CPU_C::read_RMW_virtual_qword_32(unsigned s, Bit32u offset) if (seg->cache.valid & SegAccessWOK) { if (offset <= (seg->cache.u.segment.limit_scaled-7)) { accessOK: - laddr = BX_CPU_THIS_PTR get_laddr32(s, offset); + laddr = get_laddr32(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); diff --git a/bochs/cpu/access64.cc b/bochs/cpu/access64.cc index fdb95630f..1bbe78ba9 100644 --- a/bochs/cpu/access64.cc +++ b/bochs/cpu/access64.cc @@ -35,7 +35,7 @@ BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data) BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); Bit64u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -69,7 +69,7 @@ BX_CPU_C::write_virtual_word_64(unsigned s, Bit64u offset, Bit16u data) BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_WRITE); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -121,7 +121,7 @@ BX_CPU_C::write_virtual_dword_64(unsigned s, Bit64u offset, Bit32u data) BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_WRITE); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -173,7 +173,7 @@ BX_CPU_C::write_virtual_qword_64(unsigned s, Bit64u offset, Bit64u data) BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_WRITE); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -225,7 +225,7 @@ BX_CPU_C::write_virtual_dqword_64(unsigned s, Bit64u offset, const BxPackedXmmRe BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); Bit64u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -260,7 +260,7 @@ BX_CPU_C::write_virtual_dqword_aligned_64(unsigned s, Bit64u offset, const BxPac BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); Bit64u lpf = AlignedAccessLPFOf(laddr, 15); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -304,7 +304,7 @@ void BX_CPU_C::write_virtual_dword_vector_64(unsigned s, Bit64u offset, unsigned BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_WRITE); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1); Bit64u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -343,7 +343,7 @@ void BX_CPU_C::write_virtual_dword_vector_aligned_64(unsigned s, Bit64u offset, BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_WRITE); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); Bit64u lpf = AlignedAccessLPFOf(laddr, len-1); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -387,7 +387,7 @@ BX_CPU_C::read_virtual_byte_64(unsigned s, Bit64u offset) Bit8u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); Bit64u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -420,7 +420,7 @@ BX_CPU_C::read_virtual_word_64(unsigned s, Bit64u offset) Bit16u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_READ); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -471,7 +471,7 @@ BX_CPU_C::read_virtual_dword_64(unsigned s, Bit64u offset) Bit32u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_READ); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -522,7 +522,7 @@ BX_CPU_C::read_virtual_qword_64(unsigned s, Bit64u offset) Bit64u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_READ); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -572,7 +572,7 @@ BX_CPU_C::read_virtual_dqword_64(unsigned s, Bit64u offset, BxPackedXmmRegister BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64); BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15); Bit64u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -604,7 +604,7 @@ BX_CPU_C::read_virtual_dqword_aligned_64(unsigned s, Bit64u offset, BxPackedXmmR BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64); BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); Bit64u lpf = AlignedAccessLPFOf(laddr, 15); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -645,7 +645,7 @@ void BX_CPU_C::read_virtual_dword_vector_64(unsigned s, Bit64u offset, unsigned BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64); BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_READ); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1); Bit64u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -681,7 +681,7 @@ void BX_CPU_C::read_virtual_dword_vector_aligned_64(unsigned s, Bit64u offset, u BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64); BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_READ); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); Bit64u lpf = AlignedAccessLPFOf(laddr, len-1); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -728,7 +728,7 @@ BX_CPU_C::read_RMW_virtual_byte_64(unsigned s, Bit64u offset) Bit8u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0); Bit64u lpf = LPFOf(laddr); bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex]; @@ -765,7 +765,7 @@ BX_CPU_C::read_RMW_virtual_word_64(unsigned s, Bit64u offset) Bit16u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_RW); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -820,7 +820,7 @@ BX_CPU_C::read_RMW_virtual_dword_64(unsigned s, Bit64u offset) Bit32u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask)); @@ -875,7 +875,7 @@ BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset) Bit64u data; BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW); - Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset); + Bit64u laddr = get_laddr64(s, offset); unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7); #if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4 Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask)); diff --git a/bochs/cpu/cpu.cc b/bochs/cpu/cpu.cc index 287818c21..d956c2638 100644 --- a/bochs/cpu/cpu.cc +++ b/bochs/cpu/cpu.cc @@ -506,7 +506,7 @@ void BX_CPU_C::prefetch(void) #endif { BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP); /* avoid 32-bit EIP wrap */ - laddr = BX_CPU_THIS_PTR get_laddr32(BX_SEG_REG_CS, EIP); + laddr = get_laddr32(BX_SEG_REG_CS, EIP); pageOffset = PAGE_OFFSET(laddr); // Calculate RIP at the beginning of the page. @@ -590,7 +590,7 @@ bx_bool BX_CPU_C::dbg_instruction_epilog(void) BX_CPU_THIS_PTR guard_found.cs = cs; BX_CPU_THIS_PTR guard_found.eip = debug_eip; - BX_CPU_THIS_PTR guard_found.laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, debug_eip); + BX_CPU_THIS_PTR guard_found.laddr = get_laddr(BX_SEG_REG_CS, debug_eip); BX_CPU_THIS_PTR guard_found.code_32_64 = BX_CPU_THIS_PTR fetchModeMask; // diff --git a/bochs/cpu/debugstuff.cc b/bochs/cpu/debugstuff.cc index 37497636a..08cc83259 100644 --- a/bochs/cpu/debugstuff.cc +++ b/bochs/cpu/debugstuff.cc @@ -43,7 +43,7 @@ void BX_CPU_C::debug_disasm_instruction(bx_address offset) static disassembler bx_disassemble; unsigned remainsInPage = 0x1000 - PAGE_OFFSET(offset); - bx_bool valid = dbg_xlate_linear2phy(BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, offset), &phy_addr); + bx_bool valid = dbg_xlate_linear2phy(get_laddr(BX_SEG_REG_CS, offset), &phy_addr); if (valid) { BX_MEM(0)->dbg_fetch_mem(BX_CPU_THIS, phy_addr, 16, instr_buf); unsigned isize = bx_disassemble.disasm( diff --git a/bochs/cpu/io.cc b/bochs/cpu/io.cc index 03a158d83..322e7c370 100644 --- a/bochs/cpu/io.cc +++ b/bochs/cpu/io.cc @@ -47,7 +47,7 @@ Bit32u BX_CPU_C::FastRepINSW(bxInstruction_c *i, bx_address dstOff, Bit16u port, if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; - bx_address laddrDst = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_ES, dstOff); + bx_address laddrDst = get_laddr32(BX_SEG_REG_ES, dstOff); // check that the address is word aligned if (laddrDst & 1) return 0; @@ -120,7 +120,7 @@ Bit32u BX_CPU_C::FastRepOUTSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled) return 0; - bx_address laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff); + bx_address laddrSrc = get_laddr32(srcSeg, srcOff); // check that the address is word aligned if (laddrSrc & 1) return 0; diff --git a/bochs/cpu/paging.cc b/bochs/cpu/paging.cc index 6bab59518..31d677011 100644 --- a/bochs/cpu/paging.cc +++ b/bochs/cpu/paging.cc @@ -1435,6 +1435,8 @@ bx_phy_address BX_CPU_C::nested_walk(bx_phy_address guest_paddr, unsigned rw, bx { SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb.host_state; + BX_DEBUG(("Nested walk for guest paddr 0x" FMT_ADDRX, guest_paddr)); + if (host_state->efer.get_LMA()) return nested_walk_long_mode(guest_paddr, rw, is_page_walk); else if (host_state->cr4.get_PAE()) diff --git a/bochs/cpu/proc_ctrl.cc b/bochs/cpu/proc_ctrl.cc index 0c688eb78..08814a9ba 100644 --- a/bochs/cpu/proc_ctrl.cc +++ b/bochs/cpu/proc_ctrl.cc @@ -260,7 +260,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CLFLUSH(bxInstruction_c *i) bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[i->seg()]; bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i)); - bx_address laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), eaddr); + bx_address laddr = get_laddr(i->seg(), eaddr); #if BX_SUPPORT_X86_64 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) { @@ -679,7 +679,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MONITOR(bxInstruction_c *i) bx_address offset = RAX & i->asize_mask(); // set MONITOR - bx_address laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), offset); + bx_address laddr = get_laddr(i->seg(), offset); #if BX_SUPPORT_X86_64 if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) { diff --git a/bochs/cpu/stack.cc b/bochs/cpu/stack.cc index d001bc9d1..b8e940368 100755 --- a/bochs/cpu/stack.cc +++ b/bochs/cpu/stack.cc @@ -51,7 +51,7 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::stackPrefetch(bx_address offset, unsigned else #endif { - laddr = (Bit32u) (offset + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base); + laddr = get_laddr32(BX_SEG_REG_SS, offset); pageOffset = PAGE_OFFSET(laddr); if (pageOffset + len >= 4096) // don't care for page split accesses return; diff --git a/bochs/cpu/string.cc b/bochs/cpu/string.cc index 747066f65..7f64749d2 100644 --- a/bochs/cpu/string.cc +++ b/bochs/cpu/string.cc @@ -50,12 +50,12 @@ Bit32u BX_CPU_C::FastRepMOVSB(bxInstruction_c *i, unsigned srcSeg, bx_address sr if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; - laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff); + laddrSrc = get_laddr32(srcSeg, srcOff); hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl); if (! hostAddrSrc) return 0; - laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff); + laddrDst = get_laddr32(dstSeg, dstOff); hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl); // Check that native host access was not vetoed for that page @@ -120,12 +120,12 @@ Bit32u BX_CPU_C::FastRepMOVSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; - laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff); + laddrSrc = get_laddr32(srcSeg, srcOff); hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl); if (! hostAddrSrc) return 0; - laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff); + laddrDst = get_laddr32(dstSeg, dstOff); hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl); // Check that native host access was not vetoed for that page @@ -193,12 +193,12 @@ Bit32u BX_CPU_C::FastRepMOVSD(bxInstruction_c *i, unsigned srcSeg, bx_address sr if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; - laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff); + laddrSrc = get_laddr32(srcSeg, srcOff); hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl); if (! hostAddrSrc) return 0; - laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff); + laddrDst = get_laddr32(dstSeg, dstOff); hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl); // Check that native host access was not vetoed for that page @@ -260,7 +260,7 @@ Bit32u BX_CPU_C::FastRepSTOSB(bxInstruction_c *i, unsigned dstSeg, bx_address ds if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; - laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff); + laddrDst = get_laddr32(dstSeg, dstOff); hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl); // Check that native host access was not vetoed for that page @@ -314,7 +314,7 @@ Bit32u BX_CPU_C::FastRepSTOSW(bxInstruction_c *i, unsigned dstSeg, bx_address ds if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; - laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff); + laddrDst = get_laddr32(dstSeg, dstOff); hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl); // Check that native host access was not vetoed for that page @@ -370,7 +370,7 @@ Bit32u BX_CPU_C::FastRepSTOSD(bxInstruction_c *i, unsigned dstSeg, bx_address ds if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled) return 0; - laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff); + laddrDst = get_laddr32(dstSeg, dstOff); hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl); // Check that native host access was not vetoed for that page diff --git a/bochs/cpu/vmexit.cc b/bochs/cpu/vmexit.cc index 25e7bd86f..46871dfe2 100644 --- a/bochs/cpu/vmexit.cc +++ b/bochs/cpu/vmexit.cc @@ -454,9 +454,9 @@ void BX_CPP_AttrRegparmN(3) BX_CPU_C::VMexit_IO(bxInstruction_c *i, unsigned por bx_address asize_mask = (bx_address) i->asize_mask(), laddr; if (qualification & VMX_VMEXIT_IO_PORTIN) - laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_ES, RDI & asize_mask); + laddr = get_laddr(BX_SEG_REG_ES, RDI & asize_mask); else // PORTOUT - laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), RSI & asize_mask); + laddr = get_laddr(i->seg(), RSI & asize_mask); VMwrite_natural(VMCS_GUEST_LINEAR_ADDR, laddr); @@ -530,7 +530,7 @@ Bit32u BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_LMSW(bxInstruction_c *i, Bit32u m qualification |= msw << 16; if (! i->modC0()) { qualification |= (1 << 6); // memory operand - VMwrite_natural(VMCS_GUEST_LINEAR_ADDR, BX_CPU_THIS_PTR get_laddr(i->seg(), RMAddr(i))); + VMwrite_natural(VMCS_GUEST_LINEAR_ADDR, get_laddr(i->seg(), RMAddr(i))); } VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification); diff --git a/bochs/instrument/example2/instrument.cc b/bochs/instrument/example2/instrument.cc index d79792073..7b5053e0c 100755 --- a/bochs/instrument/example2/instrument.cc +++ b/bochs/instrument/example2/instrument.cc @@ -25,9 +25,11 @@ #include "bochs.h" #include "cpu/cpu.h" -struct bx_instr_ia_stats { +#define BX_IA_STATS_ENTRIES (BX_IA_LAST*2) /* /r and /m form */ + +static struct bx_instr_ia_stats { bx_bool active; - Bit32u ia_cnt[BX_IA_LAST]; + Bit32u ia_cnt[BX_IA_STATS_ENTRIES]; Bit32u total_cnt; Bit32u interrupts; Bit32u exceptions; @@ -53,7 +55,7 @@ void bx_instr_reset(unsigned cpu, unsigned type) ia_stats[cpu].active = 1; ia_stats[cpu].total_cnt = 0; - for(int n=0; n < BX_IA_LAST; n++) + for(int n=0; n < BX_IA_STATS_ENTRIES; n++) ia_stats[cpu].ia_cnt[n] = 0; ia_stats[cpu].interrupts = ia_stats[cpu].exceptions = 0; @@ -79,18 +81,24 @@ void bx_instr_hwinterrupt(unsigned cpu, unsigned vector, Bit16u cs, bx_address e void bx_instr_before_execution(unsigned cpu, bxInstruction_c *i) { if(ia_stats[cpu].active) { - ia_stats[cpu].ia_cnt[i->getIaOpcode()]++; + ia_stats[cpu].ia_cnt[i->getIaOpcode() * 2 + !!i->modC0()]++; ia_stats[cpu].total_cnt++; if (ia_stats[cpu].total_cnt > IA_CNT_DUMP_THRESHOLD) { printf("Dump IA stats for CPU %d\n", cpu); printf("----------------------------------------------------------\n"); printf("Interrupts: %d, Exceptions: %d\n", ia_stats[cpu].interrupts, ia_stats[cpu].exceptions); - for (int n=0;n < BX_IA_LAST; n++) { - if (ia_stats[cpu].ia_cnt[n] > 0) { - printf("%s: %f%%\n", get_bx_opcode_name(n), ia_stats[cpu].ia_cnt[n] * 100.0 / ia_stats[cpu].total_cnt); - ia_stats[cpu].ia_cnt[n] = 0; + while(1) { + Bit32u max = 0, max_index = 0; + for (int n=0;n < BX_IA_STATS_ENTRIES; n++) { + if (ia_stats[cpu].ia_cnt[n] > max) { + max = ia_stats[cpu].ia_cnt[n]; + max_index = n; + } } + if (max == 0) break; + printf("%s /%c: %f%%\n", get_bx_opcode_name(max_index/2), (max_index & 1) ? 'm' : 'r', ia_stats[cpu].ia_cnt[max_index] * 100.0f / ia_stats[cpu].total_cnt); + ia_stats[cpu].ia_cnt[max_index] = 0; } ia_stats[cpu].interrupts = ia_stats[cpu].exceptions = ia_stats[cpu].total_cnt = 0; }