Fixed compilation error
This commit is contained in:
parent
1da5943f1a
commit
e5cb22772d
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: access32.cc,v 1.8 2008-08-07 22:14:38 sshwarts Exp $
|
||||
// $Id: access32.cc,v 1.9 2008-08-10 19:40:47 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (c) 2008 Stanislav Shwartsman
|
||||
@ -88,7 +88,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 1) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 1);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -144,7 +147,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 3) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 3);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -200,7 +206,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 7) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 7);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -256,7 +265,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 15) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 15);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -413,7 +425,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 1) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 1);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us read access
|
||||
@ -467,7 +482,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 3) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 3);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us read access
|
||||
@ -521,7 +539,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 7) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 7);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us read access
|
||||
@ -574,7 +595,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 15) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 15);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us read access
|
||||
@ -734,7 +758,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 1) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 1);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -792,7 +819,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 3) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 3);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -850,7 +880,10 @@ accessOK:
|
||||
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 7) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 7);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -1074,7 +1107,10 @@ accessOK:
|
||||
bx_bool user = (curr_pl == 3);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 1) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 1);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -1130,7 +1166,10 @@ accessOK:
|
||||
bx_bool user = (curr_pl == 3);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 3) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit32u lpf = AlignedAccessLPFOf(laddr, 3);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
|
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: access64.cc,v 1.13 2008-08-07 22:14:38 sshwarts Exp $
|
||||
// $Id: access64.cc,v 1.14 2008-08-10 19:40:47 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (c) 2008 Stanislav Shwartsman
|
||||
@ -77,7 +77,10 @@ BX_CPU_C::write_virtual_word_64(unsigned s, Bit64u offset, Bit16u data)
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 1) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 1);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -125,7 +128,10 @@ BX_CPU_C::write_virtual_dword_64(unsigned s, Bit64u offset, Bit32u data)
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 3) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 3);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -173,7 +179,10 @@ BX_CPU_C::write_virtual_qword_64(unsigned s, Bit64u offset, Bit64u data)
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 7) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -221,7 +230,10 @@ BX_CPU_C::write_virtual_dqword_64(unsigned s, Bit64u offset, const BxPackedXmmRe
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 15) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -352,7 +364,10 @@ BX_CPU_C::read_virtual_word_64(unsigned s, Bit64u offset)
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 1) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 1);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us read access
|
||||
@ -398,7 +413,10 @@ BX_CPU_C::read_virtual_dword_64(unsigned s, Bit64u offset)
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 3) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 3);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us read access
|
||||
@ -444,7 +462,10 @@ BX_CPU_C::read_virtual_qword_64(unsigned s, Bit64u offset)
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 7) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us read access
|
||||
@ -488,7 +509,10 @@ BX_CPU_C::read_virtual_dqword_64(unsigned s, Bit64u offset, BxPackedXmmRegister
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 15) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us read access
|
||||
@ -620,7 +644,10 @@ BX_CPU_C::read_RMW_virtual_word_64(unsigned s, Bit64u offset)
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 1) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 1);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -670,7 +697,10 @@ BX_CPU_C::read_RMW_virtual_dword_64(unsigned s, Bit64u offset)
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 3) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 3);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -720,7 +750,10 @@ BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
|
||||
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
|
||||
#if BX_SupportGuest2HostTLB
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 7) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
@ -765,7 +798,10 @@ void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr, unsigned curr_pl, Bit64u d
|
||||
#if BX_SupportGuest2HostTLB
|
||||
bx_bool user = (curr_pl == 3);
|
||||
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 7) & (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
|
||||
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
||||
lpf &= (BX_CPU_THIS_PTR alignment_check_mask);
|
||||
#endif
|
||||
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
|
||||
if (tlbEntry->lpf == lpf) {
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
|
Loading…
x
Reference in New Issue
Block a user