updated + fixed instrumentation example for instr histogram, code cleanup in the cpu
This commit is contained in:
parent
fa03e8b925
commit
279c61dc67
@ -38,7 +38,7 @@ BX_CPU_C::write_virtual_byte_32(unsigned s, Bit32u offset, Bit8u data)
|
|||||||
if (seg->cache.valid & SegAccessWOK) {
|
if (seg->cache.valid & SegAccessWOK) {
|
||||||
if (offset <= seg->cache.u.segment.limit_scaled) {
|
if (offset <= seg->cache.u.segment.limit_scaled) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
||||||
Bit32u lpf = LPFOf(laddr);
|
Bit32u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -82,7 +82,7 @@ BX_CPU_C::write_virtual_word_32(unsigned s, Bit32u offset, Bit16u data)
|
|||||||
if (seg->cache.valid & SegAccessWOK) {
|
if (seg->cache.valid & SegAccessWOK) {
|
||||||
if (offset < seg->cache.u.segment.limit_scaled) {
|
if (offset < seg->cache.u.segment.limit_scaled) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -140,7 +140,7 @@ BX_CPU_C::write_virtual_dword_32(unsigned s, Bit32u offset, Bit32u data)
|
|||||||
if (seg->cache.valid & SegAccessWOK) {
|
if (seg->cache.valid & SegAccessWOK) {
|
||||||
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
|
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -198,7 +198,7 @@ BX_CPU_C::write_virtual_qword_32(unsigned s, Bit32u offset, Bit64u data)
|
|||||||
if (seg->cache.valid & SegAccessWOK) {
|
if (seg->cache.valid & SegAccessWOK) {
|
||||||
if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
|
if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -258,7 +258,7 @@ BX_CPU_C::write_virtual_dqword_32(unsigned s, Bit32u offset, const BxPackedXmmRe
|
|||||||
if (seg->cache.valid & SegAccessWOK) {
|
if (seg->cache.valid & SegAccessWOK) {
|
||||||
if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
|
if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
||||||
Bit32u lpf = LPFOf(laddr);
|
Bit32u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -300,7 +300,7 @@ BX_CPU_C::write_virtual_dqword_aligned_32(unsigned s, Bit32u offset, const BxPac
|
|||||||
|
|
||||||
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
||||||
|
|
||||||
Bit32u laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
Bit32u laddr = get_laddr32(s, offset);
|
||||||
// must check alignment here because #GP on misaligned access is higher
|
// must check alignment here because #GP on misaligned access is higher
|
||||||
// priority than other segment related faults
|
// priority than other segment related faults
|
||||||
if (laddr & 15) {
|
if (laddr & 15) {
|
||||||
@ -359,7 +359,7 @@ void BX_CPU_C::write_virtual_dword_vector_32(unsigned s, Bit32u offset, unsigned
|
|||||||
if (seg->cache.valid & SegAccessWOK) {
|
if (seg->cache.valid & SegAccessWOK) {
|
||||||
if (offset < (seg->cache.u.segment.limit_scaled-len)) {
|
if (offset < (seg->cache.u.segment.limit_scaled-len)) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1);
|
||||||
Bit32u lpf = LPFOf(laddr);
|
Bit32u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -405,7 +405,7 @@ void BX_CPU_C::write_virtual_dword_vector_aligned_32(unsigned s, Bit32u offset,
|
|||||||
|
|
||||||
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
||||||
|
|
||||||
Bit32u laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
Bit32u laddr = get_laddr32(s, offset);
|
||||||
// must check alignment here because #GP on misaligned access is higher
|
// must check alignment here because #GP on misaligned access is higher
|
||||||
// priority than other segment related faults
|
// priority than other segment related faults
|
||||||
if (laddr & (len-1)) {
|
if (laddr & (len-1)) {
|
||||||
@ -467,7 +467,7 @@ BX_CPU_C::read_virtual_byte_32(unsigned s, Bit32u offset)
|
|||||||
if (seg->cache.valid & SegAccessROK) {
|
if (seg->cache.valid & SegAccessROK) {
|
||||||
if (offset <= seg->cache.u.segment.limit_scaled) {
|
if (offset <= seg->cache.u.segment.limit_scaled) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
||||||
Bit32u lpf = LPFOf(laddr);
|
Bit32u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -511,7 +511,7 @@ BX_CPU_C::read_virtual_word_32(unsigned s, Bit32u offset)
|
|||||||
if (seg->cache.valid & SegAccessROK) {
|
if (seg->cache.valid & SegAccessROK) {
|
||||||
if (offset < seg->cache.u.segment.limit_scaled) {
|
if (offset < seg->cache.u.segment.limit_scaled) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -568,7 +568,7 @@ BX_CPU_C::read_virtual_dword_32(unsigned s, Bit32u offset)
|
|||||||
if (seg->cache.valid & SegAccessROK) {
|
if (seg->cache.valid & SegAccessROK) {
|
||||||
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
|
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -625,7 +625,7 @@ BX_CPU_C::read_virtual_qword_32(unsigned s, Bit32u offset)
|
|||||||
if (seg->cache.valid & SegAccessROK) {
|
if (seg->cache.valid & SegAccessROK) {
|
||||||
if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
|
if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -683,7 +683,7 @@ BX_CPU_C::read_virtual_dqword_32(unsigned s, Bit32u offset, BxPackedXmmRegister
|
|||||||
if (seg->cache.valid & SegAccessROK) {
|
if (seg->cache.valid & SegAccessROK) {
|
||||||
if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
|
if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
||||||
Bit32u lpf = LPFOf(laddr);
|
Bit32u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -722,7 +722,7 @@ BX_CPU_C::read_virtual_dqword_aligned_32(unsigned s, Bit32u offset, BxPackedXmmR
|
|||||||
|
|
||||||
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
||||||
|
|
||||||
Bit32u laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
Bit32u laddr = get_laddr32(s, offset);
|
||||||
// must check alignment here because #GP on misaligned access is higher
|
// must check alignment here because #GP on misaligned access is higher
|
||||||
// priority than other segment related faults
|
// priority than other segment related faults
|
||||||
if (laddr & 15) {
|
if (laddr & 15) {
|
||||||
@ -779,7 +779,7 @@ void BX_CPU_C::read_virtual_dword_vector_32(unsigned s, Bit32u offset, unsigned
|
|||||||
if (seg->cache.valid & SegAccessROK) {
|
if (seg->cache.valid & SegAccessROK) {
|
||||||
if (offset < (seg->cache.u.segment.limit_scaled-len)) {
|
if (offset < (seg->cache.u.segment.limit_scaled-len)) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1);
|
||||||
Bit32u lpf = LPFOf(laddr);
|
Bit32u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -822,7 +822,7 @@ void BX_CPU_C::read_virtual_dword_vector_aligned_32(unsigned s, Bit32u offset, u
|
|||||||
|
|
||||||
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
|
||||||
|
|
||||||
Bit32u laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
Bit32u laddr = get_laddr32(s, offset);
|
||||||
// must check alignment here because #GP on misaligned access is higher
|
// must check alignment here because #GP on misaligned access is higher
|
||||||
// priority than other segment related faults
|
// priority than other segment related faults
|
||||||
if (laddr & (len-1)) {
|
if (laddr & (len-1)) {
|
||||||
@ -887,7 +887,7 @@ BX_CPU_C::read_RMW_virtual_byte_32(unsigned s, Bit32u offset)
|
|||||||
if (seg->cache.valid & SegAccessWOK) {
|
if (seg->cache.valid & SegAccessWOK) {
|
||||||
if (offset <= seg->cache.u.segment.limit_scaled) {
|
if (offset <= seg->cache.u.segment.limit_scaled) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
||||||
Bit32u lpf = LPFOf(laddr);
|
Bit32u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -934,7 +934,7 @@ BX_CPU_C::read_RMW_virtual_word_32(unsigned s, Bit32u offset)
|
|||||||
if (seg->cache.valid & SegAccessWOK) {
|
if (seg->cache.valid & SegAccessWOK) {
|
||||||
if (offset < seg->cache.u.segment.limit_scaled) {
|
if (offset < seg->cache.u.segment.limit_scaled) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -995,7 +995,7 @@ BX_CPU_C::read_RMW_virtual_dword_32(unsigned s, Bit32u offset)
|
|||||||
if (seg->cache.valid & SegAccessWOK) {
|
if (seg->cache.valid & SegAccessWOK) {
|
||||||
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
|
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -1056,7 +1056,7 @@ BX_CPU_C::read_RMW_virtual_qword_32(unsigned s, Bit32u offset)
|
|||||||
if (seg->cache.valid & SegAccessWOK) {
|
if (seg->cache.valid & SegAccessWOK) {
|
||||||
if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
|
if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
|
||||||
accessOK:
|
accessOK:
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
laddr = get_laddr32(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
|
@ -35,7 +35,7 @@ BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
|
|||||||
|
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
||||||
Bit64u lpf = LPFOf(laddr);
|
Bit64u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -69,7 +69,7 @@ BX_CPU_C::write_virtual_word_64(unsigned s, Bit64u offset, Bit16u data)
|
|||||||
|
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_WRITE);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_WRITE);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -121,7 +121,7 @@ BX_CPU_C::write_virtual_dword_64(unsigned s, Bit64u offset, Bit32u data)
|
|||||||
|
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_WRITE);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_WRITE);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -173,7 +173,7 @@ BX_CPU_C::write_virtual_qword_64(unsigned s, Bit64u offset, Bit64u data)
|
|||||||
|
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_WRITE);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_WRITE);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -225,7 +225,7 @@ BX_CPU_C::write_virtual_dqword_64(unsigned s, Bit64u offset, const BxPackedXmmRe
|
|||||||
|
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
||||||
Bit64u lpf = LPFOf(laddr);
|
Bit64u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -260,7 +260,7 @@ BX_CPU_C::write_virtual_dqword_aligned_64(unsigned s, Bit64u offset, const BxPac
|
|||||||
|
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
|
Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -304,7 +304,7 @@ void BX_CPU_C::write_virtual_dword_vector_64(unsigned s, Bit64u offset, unsigned
|
|||||||
|
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_WRITE);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_WRITE);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1);
|
||||||
Bit64u lpf = LPFOf(laddr);
|
Bit64u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -343,7 +343,7 @@ void BX_CPU_C::write_virtual_dword_vector_aligned_64(unsigned s, Bit64u offset,
|
|||||||
|
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_WRITE);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_WRITE);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, len-1);
|
Bit64u lpf = AlignedAccessLPFOf(laddr, len-1);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -387,7 +387,7 @@ BX_CPU_C::read_virtual_byte_64(unsigned s, Bit64u offset)
|
|||||||
Bit8u data;
|
Bit8u data;
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
||||||
Bit64u lpf = LPFOf(laddr);
|
Bit64u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -420,7 +420,7 @@ BX_CPU_C::read_virtual_word_64(unsigned s, Bit64u offset)
|
|||||||
Bit16u data;
|
Bit16u data;
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_READ);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_READ);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -471,7 +471,7 @@ BX_CPU_C::read_virtual_dword_64(unsigned s, Bit64u offset)
|
|||||||
Bit32u data;
|
Bit32u data;
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_READ);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_READ);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -522,7 +522,7 @@ BX_CPU_C::read_virtual_qword_64(unsigned s, Bit64u offset)
|
|||||||
Bit64u data;
|
Bit64u data;
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_READ);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_READ);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -572,7 +572,7 @@ BX_CPU_C::read_virtual_dqword_64(unsigned s, Bit64u offset, BxPackedXmmRegister
|
|||||||
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
||||||
Bit64u lpf = LPFOf(laddr);
|
Bit64u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -604,7 +604,7 @@ BX_CPU_C::read_virtual_dqword_aligned_64(unsigned s, Bit64u offset, BxPackedXmmR
|
|||||||
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
|
Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -645,7 +645,7 @@ void BX_CPU_C::read_virtual_dword_vector_64(unsigned s, Bit64u offset, unsigned
|
|||||||
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_READ);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_READ);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, len-1);
|
||||||
Bit64u lpf = LPFOf(laddr);
|
Bit64u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -681,7 +681,7 @@ void BX_CPU_C::read_virtual_dword_vector_aligned_64(unsigned s, Bit64u offset, u
|
|||||||
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
|
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64);
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_READ);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, len, BX_READ);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, len-1);
|
Bit64u lpf = AlignedAccessLPFOf(laddr, len-1);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -728,7 +728,7 @@ BX_CPU_C::read_RMW_virtual_byte_64(unsigned s, Bit64u offset)
|
|||||||
Bit8u data;
|
Bit8u data;
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
|
||||||
Bit64u lpf = LPFOf(laddr);
|
Bit64u lpf = LPFOf(laddr);
|
||||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||||
@ -765,7 +765,7 @@ BX_CPU_C::read_RMW_virtual_word_64(unsigned s, Bit64u offset)
|
|||||||
Bit16u data;
|
Bit16u data;
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_RW);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_RW);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -820,7 +820,7 @@ BX_CPU_C::read_RMW_virtual_dword_64(unsigned s, Bit64u offset)
|
|||||||
Bit32u data;
|
Bit32u data;
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
@ -875,7 +875,7 @@ BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
|
|||||||
Bit64u data;
|
Bit64u data;
|
||||||
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);
|
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);
|
||||||
|
|
||||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
Bit64u laddr = get_laddr64(s, offset);
|
||||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||||
Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
|
||||||
|
@ -506,7 +506,7 @@ void BX_CPU_C::prefetch(void)
|
|||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP); /* avoid 32-bit EIP wrap */
|
BX_CLEAR_64BIT_HIGH(BX_64BIT_REG_RIP); /* avoid 32-bit EIP wrap */
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr32(BX_SEG_REG_CS, EIP);
|
laddr = get_laddr32(BX_SEG_REG_CS, EIP);
|
||||||
pageOffset = PAGE_OFFSET(laddr);
|
pageOffset = PAGE_OFFSET(laddr);
|
||||||
|
|
||||||
// Calculate RIP at the beginning of the page.
|
// Calculate RIP at the beginning of the page.
|
||||||
@ -590,7 +590,7 @@ bx_bool BX_CPU_C::dbg_instruction_epilog(void)
|
|||||||
|
|
||||||
BX_CPU_THIS_PTR guard_found.cs = cs;
|
BX_CPU_THIS_PTR guard_found.cs = cs;
|
||||||
BX_CPU_THIS_PTR guard_found.eip = debug_eip;
|
BX_CPU_THIS_PTR guard_found.eip = debug_eip;
|
||||||
BX_CPU_THIS_PTR guard_found.laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, debug_eip);
|
BX_CPU_THIS_PTR guard_found.laddr = get_laddr(BX_SEG_REG_CS, debug_eip);
|
||||||
BX_CPU_THIS_PTR guard_found.code_32_64 = BX_CPU_THIS_PTR fetchModeMask;
|
BX_CPU_THIS_PTR guard_found.code_32_64 = BX_CPU_THIS_PTR fetchModeMask;
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -43,7 +43,7 @@ void BX_CPU_C::debug_disasm_instruction(bx_address offset)
|
|||||||
static disassembler bx_disassemble;
|
static disassembler bx_disassemble;
|
||||||
unsigned remainsInPage = 0x1000 - PAGE_OFFSET(offset);
|
unsigned remainsInPage = 0x1000 - PAGE_OFFSET(offset);
|
||||||
|
|
||||||
bx_bool valid = dbg_xlate_linear2phy(BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_CS, offset), &phy_addr);
|
bx_bool valid = dbg_xlate_linear2phy(get_laddr(BX_SEG_REG_CS, offset), &phy_addr);
|
||||||
if (valid) {
|
if (valid) {
|
||||||
BX_MEM(0)->dbg_fetch_mem(BX_CPU_THIS, phy_addr, 16, instr_buf);
|
BX_MEM(0)->dbg_fetch_mem(BX_CPU_THIS, phy_addr, 16, instr_buf);
|
||||||
unsigned isize = bx_disassemble.disasm(
|
unsigned isize = bx_disassemble.disasm(
|
||||||
|
@ -47,7 +47,7 @@ Bit32u BX_CPU_C::FastRepINSW(bxInstruction_c *i, bx_address dstOff, Bit16u port,
|
|||||||
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bx_address laddrDst = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_ES, dstOff);
|
bx_address laddrDst = get_laddr32(BX_SEG_REG_ES, dstOff);
|
||||||
// check that the address is word aligned
|
// check that the address is word aligned
|
||||||
if (laddrDst & 1) return 0;
|
if (laddrDst & 1) return 0;
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ Bit32u BX_CPU_C::FastRepOUTSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr
|
|||||||
if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled)
|
if ((srcOff | 0xfff) > srcSegPtr->cache.u.segment.limit_scaled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
bx_address laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff);
|
bx_address laddrSrc = get_laddr32(srcSeg, srcOff);
|
||||||
// check that the address is word aligned
|
// check that the address is word aligned
|
||||||
if (laddrSrc & 1) return 0;
|
if (laddrSrc & 1) return 0;
|
||||||
|
|
||||||
|
@ -1435,6 +1435,8 @@ bx_phy_address BX_CPU_C::nested_walk(bx_phy_address guest_paddr, unsigned rw, bx
|
|||||||
{
|
{
|
||||||
SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb.host_state;
|
SVM_HOST_STATE *host_state = &BX_CPU_THIS_PTR vmcb.host_state;
|
||||||
|
|
||||||
|
BX_DEBUG(("Nested walk for guest paddr 0x" FMT_ADDRX, guest_paddr));
|
||||||
|
|
||||||
if (host_state->efer.get_LMA())
|
if (host_state->efer.get_LMA())
|
||||||
return nested_walk_long_mode(guest_paddr, rw, is_page_walk);
|
return nested_walk_long_mode(guest_paddr, rw, is_page_walk);
|
||||||
else if (host_state->cr4.get_PAE())
|
else if (host_state->cr4.get_PAE())
|
||||||
|
@ -260,7 +260,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::CLFLUSH(bxInstruction_c *i)
|
|||||||
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[i->seg()];
|
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[i->seg()];
|
||||||
|
|
||||||
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
|
bx_address eaddr = BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
|
||||||
bx_address laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), eaddr);
|
bx_address laddr = get_laddr(i->seg(), eaddr);
|
||||||
|
|
||||||
#if BX_SUPPORT_X86_64
|
#if BX_SUPPORT_X86_64
|
||||||
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
|
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
|
||||||
@ -679,7 +679,7 @@ BX_INSF_TYPE BX_CPP_AttrRegparmN(1) BX_CPU_C::MONITOR(bxInstruction_c *i)
|
|||||||
bx_address offset = RAX & i->asize_mask();
|
bx_address offset = RAX & i->asize_mask();
|
||||||
|
|
||||||
// set MONITOR
|
// set MONITOR
|
||||||
bx_address laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), offset);
|
bx_address laddr = get_laddr(i->seg(), offset);
|
||||||
|
|
||||||
#if BX_SUPPORT_X86_64
|
#if BX_SUPPORT_X86_64
|
||||||
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
|
if (BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) {
|
||||||
|
@ -51,7 +51,7 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::stackPrefetch(bx_address offset, unsigned
|
|||||||
else
|
else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
laddr = (Bit32u) (offset + BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.base);
|
laddr = get_laddr32(BX_SEG_REG_SS, offset);
|
||||||
pageOffset = PAGE_OFFSET(laddr);
|
pageOffset = PAGE_OFFSET(laddr);
|
||||||
if (pageOffset + len >= 4096) // don't care for page split accesses
|
if (pageOffset + len >= 4096) // don't care for page split accesses
|
||||||
return;
|
return;
|
||||||
|
@ -50,12 +50,12 @@ Bit32u BX_CPU_C::FastRepMOVSB(bxInstruction_c *i, unsigned srcSeg, bx_address sr
|
|||||||
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff);
|
laddrSrc = get_laddr32(srcSeg, srcOff);
|
||||||
|
|
||||||
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
|
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
|
||||||
if (! hostAddrSrc) return 0;
|
if (! hostAddrSrc) return 0;
|
||||||
|
|
||||||
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
|
laddrDst = get_laddr32(dstSeg, dstOff);
|
||||||
|
|
||||||
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
||||||
// Check that native host access was not vetoed for that page
|
// Check that native host access was not vetoed for that page
|
||||||
@ -120,12 +120,12 @@ Bit32u BX_CPU_C::FastRepMOVSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr
|
|||||||
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff);
|
laddrSrc = get_laddr32(srcSeg, srcOff);
|
||||||
|
|
||||||
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
|
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
|
||||||
if (! hostAddrSrc) return 0;
|
if (! hostAddrSrc) return 0;
|
||||||
|
|
||||||
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
|
laddrDst = get_laddr32(dstSeg, dstOff);
|
||||||
|
|
||||||
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
||||||
// Check that native host access was not vetoed for that page
|
// Check that native host access was not vetoed for that page
|
||||||
@ -193,12 +193,12 @@ Bit32u BX_CPU_C::FastRepMOVSD(bxInstruction_c *i, unsigned srcSeg, bx_address sr
|
|||||||
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff);
|
laddrSrc = get_laddr32(srcSeg, srcOff);
|
||||||
|
|
||||||
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
|
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
|
||||||
if (! hostAddrSrc) return 0;
|
if (! hostAddrSrc) return 0;
|
||||||
|
|
||||||
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
|
laddrDst = get_laddr32(dstSeg, dstOff);
|
||||||
|
|
||||||
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
||||||
// Check that native host access was not vetoed for that page
|
// Check that native host access was not vetoed for that page
|
||||||
@ -260,7 +260,7 @@ Bit32u BX_CPU_C::FastRepSTOSB(bxInstruction_c *i, unsigned dstSeg, bx_address ds
|
|||||||
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
|
laddrDst = get_laddr32(dstSeg, dstOff);
|
||||||
|
|
||||||
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
||||||
// Check that native host access was not vetoed for that page
|
// Check that native host access was not vetoed for that page
|
||||||
@ -314,7 +314,7 @@ Bit32u BX_CPU_C::FastRepSTOSW(bxInstruction_c *i, unsigned dstSeg, bx_address ds
|
|||||||
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
|
laddrDst = get_laddr32(dstSeg, dstOff);
|
||||||
|
|
||||||
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
||||||
// Check that native host access was not vetoed for that page
|
// Check that native host access was not vetoed for that page
|
||||||
@ -370,7 +370,7 @@ Bit32u BX_CPU_C::FastRepSTOSD(bxInstruction_c *i, unsigned dstSeg, bx_address ds
|
|||||||
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
if ((dstOff | 0xfff) > dstSegPtr->cache.u.segment.limit_scaled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
|
laddrDst = get_laddr32(dstSeg, dstOff);
|
||||||
|
|
||||||
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
|
||||||
// Check that native host access was not vetoed for that page
|
// Check that native host access was not vetoed for that page
|
||||||
|
@ -454,9 +454,9 @@ void BX_CPP_AttrRegparmN(3) BX_CPU_C::VMexit_IO(bxInstruction_c *i, unsigned por
|
|||||||
bx_address asize_mask = (bx_address) i->asize_mask(), laddr;
|
bx_address asize_mask = (bx_address) i->asize_mask(), laddr;
|
||||||
|
|
||||||
if (qualification & VMX_VMEXIT_IO_PORTIN)
|
if (qualification & VMX_VMEXIT_IO_PORTIN)
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr(BX_SEG_REG_ES, RDI & asize_mask);
|
laddr = get_laddr(BX_SEG_REG_ES, RDI & asize_mask);
|
||||||
else // PORTOUT
|
else // PORTOUT
|
||||||
laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), RSI & asize_mask);
|
laddr = get_laddr(i->seg(), RSI & asize_mask);
|
||||||
|
|
||||||
VMwrite_natural(VMCS_GUEST_LINEAR_ADDR, laddr);
|
VMwrite_natural(VMCS_GUEST_LINEAR_ADDR, laddr);
|
||||||
|
|
||||||
@ -530,7 +530,7 @@ Bit32u BX_CPP_AttrRegparmN(2) BX_CPU_C::VMexit_LMSW(bxInstruction_c *i, Bit32u m
|
|||||||
qualification |= msw << 16;
|
qualification |= msw << 16;
|
||||||
if (! i->modC0()) {
|
if (! i->modC0()) {
|
||||||
qualification |= (1 << 6); // memory operand
|
qualification |= (1 << 6); // memory operand
|
||||||
VMwrite_natural(VMCS_GUEST_LINEAR_ADDR, BX_CPU_THIS_PTR get_laddr(i->seg(), RMAddr(i)));
|
VMwrite_natural(VMCS_GUEST_LINEAR_ADDR, get_laddr(i->seg(), RMAddr(i)));
|
||||||
}
|
}
|
||||||
|
|
||||||
VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification);
|
VMexit(i, VMX_VMEXIT_CR_ACCESS, qualification);
|
||||||
|
@ -25,9 +25,11 @@
|
|||||||
#include "bochs.h"
|
#include "bochs.h"
|
||||||
#include "cpu/cpu.h"
|
#include "cpu/cpu.h"
|
||||||
|
|
||||||
struct bx_instr_ia_stats {
|
#define BX_IA_STATS_ENTRIES (BX_IA_LAST*2) /* /r and /m form */
|
||||||
|
|
||||||
|
static struct bx_instr_ia_stats {
|
||||||
bx_bool active;
|
bx_bool active;
|
||||||
Bit32u ia_cnt[BX_IA_LAST];
|
Bit32u ia_cnt[BX_IA_STATS_ENTRIES];
|
||||||
Bit32u total_cnt;
|
Bit32u total_cnt;
|
||||||
Bit32u interrupts;
|
Bit32u interrupts;
|
||||||
Bit32u exceptions;
|
Bit32u exceptions;
|
||||||
@ -53,7 +55,7 @@ void bx_instr_reset(unsigned cpu, unsigned type)
|
|||||||
ia_stats[cpu].active = 1;
|
ia_stats[cpu].active = 1;
|
||||||
ia_stats[cpu].total_cnt = 0;
|
ia_stats[cpu].total_cnt = 0;
|
||||||
|
|
||||||
for(int n=0; n < BX_IA_LAST; n++)
|
for(int n=0; n < BX_IA_STATS_ENTRIES; n++)
|
||||||
ia_stats[cpu].ia_cnt[n] = 0;
|
ia_stats[cpu].ia_cnt[n] = 0;
|
||||||
|
|
||||||
ia_stats[cpu].interrupts = ia_stats[cpu].exceptions = 0;
|
ia_stats[cpu].interrupts = ia_stats[cpu].exceptions = 0;
|
||||||
@ -79,18 +81,24 @@ void bx_instr_hwinterrupt(unsigned cpu, unsigned vector, Bit16u cs, bx_address e
|
|||||||
void bx_instr_before_execution(unsigned cpu, bxInstruction_c *i)
|
void bx_instr_before_execution(unsigned cpu, bxInstruction_c *i)
|
||||||
{
|
{
|
||||||
if(ia_stats[cpu].active) {
|
if(ia_stats[cpu].active) {
|
||||||
ia_stats[cpu].ia_cnt[i->getIaOpcode()]++;
|
ia_stats[cpu].ia_cnt[i->getIaOpcode() * 2 + !!i->modC0()]++;
|
||||||
ia_stats[cpu].total_cnt++;
|
ia_stats[cpu].total_cnt++;
|
||||||
|
|
||||||
if (ia_stats[cpu].total_cnt > IA_CNT_DUMP_THRESHOLD) {
|
if (ia_stats[cpu].total_cnt > IA_CNT_DUMP_THRESHOLD) {
|
||||||
printf("Dump IA stats for CPU %d\n", cpu);
|
printf("Dump IA stats for CPU %d\n", cpu);
|
||||||
printf("----------------------------------------------------------\n");
|
printf("----------------------------------------------------------\n");
|
||||||
printf("Interrupts: %d, Exceptions: %d\n", ia_stats[cpu].interrupts, ia_stats[cpu].exceptions);
|
printf("Interrupts: %d, Exceptions: %d\n", ia_stats[cpu].interrupts, ia_stats[cpu].exceptions);
|
||||||
for (int n=0;n < BX_IA_LAST; n++) {
|
while(1) {
|
||||||
if (ia_stats[cpu].ia_cnt[n] > 0) {
|
Bit32u max = 0, max_index = 0;
|
||||||
printf("%s: %f%%\n", get_bx_opcode_name(n), ia_stats[cpu].ia_cnt[n] * 100.0 / ia_stats[cpu].total_cnt);
|
for (int n=0;n < BX_IA_STATS_ENTRIES; n++) {
|
||||||
ia_stats[cpu].ia_cnt[n] = 0;
|
if (ia_stats[cpu].ia_cnt[n] > max) {
|
||||||
|
max = ia_stats[cpu].ia_cnt[n];
|
||||||
|
max_index = n;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
if (max == 0) break;
|
||||||
|
printf("%s /%c: %f%%\n", get_bx_opcode_name(max_index/2), (max_index & 1) ? 'm' : 'r', ia_stats[cpu].ia_cnt[max_index] * 100.0f / ia_stats[cpu].total_cnt);
|
||||||
|
ia_stats[cpu].ia_cnt[max_index] = 0;
|
||||||
}
|
}
|
||||||
ia_stats[cpu].interrupts = ia_stats[cpu].exceptions = ia_stats[cpu].total_cnt = 0;
|
ia_stats[cpu].interrupts = ia_stats[cpu].exceptions = ia_stats[cpu].total_cnt = 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user