split Bochs CPU TLB to DTLB and ITLB to avoid aliasing conflicts between them. ~5% speedup measured

This commit is contained in:
Stanislav Shwartsman 2019-12-09 18:37:02 +00:00
parent 311ef81e87
commit 4b66fecaad
10 changed files with 155 additions and 98 deletions

View File

@ -1481,9 +1481,14 @@ void bx_dbg_xlate_address(bx_lin_address laddr)
void bx_dbg_tlb_lookup(bx_lin_address laddr)
{
Bit32u index = BX_TLB_INDEX_OF(laddr, 0);
char cpu_param_name[16];
sprintf(cpu_param_name, "TLB.entry%d", index);
index = BX_ITLB_INDEX_OF(laddr, 0);
sprintf(cpu_param_name, "ITLB.entry%d", index);
bx_dbg_show_param_command(cpu_param_name, 0);
Bit32u index = BX_DTLB_INDEX_OF(laddr, 0);
sprintf(cpu_param_name, "DTLB.entry%d", index);
bx_dbg_show_param_command(cpu_param_name, 0);
}

View File

@ -297,7 +297,7 @@ BX_CPU_C::system_read_byte(bx_address laddr)
Bit8u data;
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
@ -323,7 +323,7 @@ BX_CPU_C::system_read_word(bx_address laddr)
Bit16u data;
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 1);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
@ -349,7 +349,7 @@ BX_CPU_C::system_read_dword(bx_address laddr)
Bit32u data;
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 3);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
@ -375,7 +375,7 @@ BX_CPU_C::system_read_qword(bx_address laddr)
Bit64u data;
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 7);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
@ -399,7 +399,7 @@ BX_CPU_C::system_read_qword(bx_address laddr)
BX_CPU_C::system_write_byte(bx_address laddr, Bit8u data)
{
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -423,7 +423,7 @@ BX_CPU_C::system_write_byte(bx_address laddr, Bit8u data)
BX_CPU_C::system_write_word(bx_address laddr, Bit16u data)
{
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 1);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -447,7 +447,7 @@ BX_CPU_C::system_write_word(bx_address laddr, Bit16u data)
BX_CPU_C::system_write_dword(bx_address laddr, Bit32u data)
{
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 3);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -471,7 +471,7 @@ BX_CPU_C::system_write_dword(bx_address laddr, Bit32u data)
BX_CPU_C::v2h_read_byte(bx_address laddr, bx_bool user)
{
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
@ -490,7 +490,7 @@ BX_CPU_C::v2h_read_byte(bx_address laddr, bx_bool user)
BX_CPU_C::v2h_write_byte(bx_address laddr, bx_bool user)
{
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf)
{
// See if the TLB entry privilege level allows us write access

View File

@ -30,7 +30,7 @@
BX_CPU_C::write_linear_byte(unsigned s, bx_address laddr, Bit8u data)
{
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -53,7 +53,7 @@ BX_CPU_C::write_linear_byte(unsigned s, bx_address laddr, Bit8u data)
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_linear_word(unsigned s, bx_address laddr, Bit16u data)
{
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 1);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -81,7 +81,7 @@ BX_CPU_C::write_linear_word(unsigned s, bx_address laddr, Bit16u data)
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_linear_dword(unsigned s, bx_address laddr, Bit32u data)
{
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 3);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -109,7 +109,7 @@ BX_CPU_C::write_linear_dword(unsigned s, bx_address laddr, Bit32u data)
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_linear_qword(unsigned s, bx_address laddr, Bit64u data)
{
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 7);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -140,7 +140,7 @@ BX_CPU_C::write_linear_qword(unsigned s, bx_address laddr, Bit64u data)
BX_CPU_C::write_linear_xmmword(unsigned s, bx_address laddr, const BxPackedXmmRegister *data)
{
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 15);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 15);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -165,7 +165,7 @@ BX_CPU_C::write_linear_xmmword(unsigned s, bx_address laddr, const BxPackedXmmRe
BX_CPU_C::write_linear_xmmword_aligned(unsigned s, bx_address laddr, const BxPackedXmmRegister *data)
{
bx_address lpf = AlignedAccessLPFOf(laddr, 15);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -195,7 +195,7 @@ BX_CPU_C::write_linear_xmmword_aligned(unsigned s, bx_address laddr, const BxPac
BX_CPU_C::write_linear_ymmword(unsigned s, bx_address laddr, const BxPackedYmmRegister *data)
{
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 31);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 31);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -221,7 +221,7 @@ BX_CPU_C::write_linear_ymmword(unsigned s, bx_address laddr, const BxPackedYmmRe
BX_CPU_C::write_linear_ymmword_aligned(unsigned s, bx_address laddr, const BxPackedYmmRegister *data)
{
bx_address lpf = AlignedAccessLPFOf(laddr, 31);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -252,7 +252,7 @@ BX_CPU_C::write_linear_ymmword_aligned(unsigned s, bx_address laddr, const BxPac
BX_CPU_C::write_linear_zmmword(unsigned s, bx_address laddr, const BxPackedZmmRegister *data)
{
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 63);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 63);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -278,7 +278,7 @@ BX_CPU_C::write_linear_zmmword(unsigned s, bx_address laddr, const BxPackedZmmRe
BX_CPU_C::write_linear_zmmword_aligned(unsigned s, bx_address laddr, const BxPackedZmmRegister *data)
{
bx_address lpf = AlignedAccessLPFOf(laddr, 63);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -311,7 +311,7 @@ BX_CPU_C::write_linear_zmmword_aligned(unsigned s, bx_address laddr, const BxPac
BX_CPU_C::tickle_read_linear(unsigned s, bx_address laddr)
{
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
@ -343,7 +343,7 @@ BX_CPU_C::read_linear_byte(unsigned s, bx_address laddr)
Bit8u data;
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
@ -368,7 +368,7 @@ BX_CPU_C::read_linear_word(unsigned s, bx_address laddr)
{
Bit16u data;
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 1);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -398,7 +398,7 @@ BX_CPU_C::read_linear_dword(unsigned s, bx_address laddr)
{
Bit32u data;
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 3);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -428,7 +428,7 @@ BX_CPU_C::read_linear_qword(unsigned s, bx_address laddr)
{
Bit64u data;
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 7);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -458,7 +458,7 @@ BX_CPU_C::read_linear_qword(unsigned s, bx_address laddr)
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::read_linear_xmmword(unsigned s, bx_address laddr, BxPackedXmmRegister *data)
{
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 15);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 15);
bx_address lpf = LPFOf(laddr);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
@ -482,7 +482,7 @@ BX_CPU_C::read_linear_xmmword(unsigned s, bx_address laddr, BxPackedXmmRegister
BX_CPU_C::read_linear_xmmword_aligned(unsigned s, bx_address laddr, BxPackedXmmRegister *data)
{
bx_address lpf = AlignedAccessLPFOf(laddr, 15);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
@ -509,7 +509,7 @@ BX_CPU_C::read_linear_xmmword_aligned(unsigned s, bx_address laddr, BxPackedXmmR
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::read_linear_ymmword(unsigned s, bx_address laddr, BxPackedYmmRegister *data)
{
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 31);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 31);
bx_address lpf = LPFOf(laddr);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
@ -534,7 +534,7 @@ BX_CPU_C::read_linear_ymmword(unsigned s, bx_address laddr, BxPackedYmmRegister
BX_CPU_C::read_linear_ymmword_aligned(unsigned s, bx_address laddr, BxPackedYmmRegister *data)
{
bx_address lpf = AlignedAccessLPFOf(laddr, 31);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
@ -562,7 +562,7 @@ BX_CPU_C::read_linear_ymmword_aligned(unsigned s, bx_address laddr, BxPackedYmmR
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::read_linear_zmmword(unsigned s, bx_address laddr, BxPackedZmmRegister *data)
{
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 63);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 63);
bx_address lpf = LPFOf(laddr);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
@ -587,7 +587,7 @@ BX_CPU_C::read_linear_zmmword(unsigned s, bx_address laddr, BxPackedZmmRegister
BX_CPU_C::read_linear_zmmword_aligned(unsigned s, bx_address laddr, BxPackedZmmRegister *data)
{
bx_address lpf = AlignedAccessLPFOf(laddr, 63);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
@ -624,7 +624,7 @@ BX_CPU_C::read_RMW_linear_byte(unsigned s, bx_address laddr)
{
Bit8u data;
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -655,7 +655,7 @@ BX_CPU_C::read_RMW_linear_byte(unsigned s, bx_address laddr)
BX_CPU_C::read_RMW_linear_word(unsigned s, bx_address laddr)
{
Bit16u data;
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 1);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -691,7 +691,7 @@ BX_CPU_C::read_RMW_linear_word(unsigned s, bx_address laddr)
BX_CPU_C::read_RMW_linear_dword(unsigned s, bx_address laddr)
{
Bit32u data;
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 3);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -727,7 +727,7 @@ BX_CPU_C::read_RMW_linear_dword(unsigned s, bx_address laddr)
BX_CPU_C::read_RMW_linear_qword(unsigned s, bx_address laddr)
{
Bit64u data;
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 7);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -921,7 +921,7 @@ BX_CPU_C::write_RMW_linear_qword(Bit64u val64)
void BX_CPU_C::read_RMW_linear_dqword_aligned_64(unsigned s, bx_address laddr, Bit64u *hi, Bit64u *lo)
{
bx_address lpf = AlignedAccessLPFOf(laddr, 15);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
@ -983,7 +983,7 @@ void BX_CPU_C::write_RMW_linear_dqword(Bit64u hi, Bit64u lo)
void BX_CPU_C::write_new_stack_word(bx_address laddr, unsigned curr_pl, Bit16u data)
{
bx_bool user = (curr_pl == 3);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 1);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -1011,7 +1011,7 @@ void BX_CPU_C::write_new_stack_word(bx_address laddr, unsigned curr_pl, Bit16u d
void BX_CPU_C::write_new_stack_dword(bx_address laddr, unsigned curr_pl, Bit32u data)
{
bx_bool user = (curr_pl == 3);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 3);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
#else
@ -1039,7 +1039,7 @@ void BX_CPU_C::write_new_stack_dword(bx_address laddr, unsigned curr_pl, Bit32u
void BX_CPU_C::write_new_stack_qword(bx_address laddr, unsigned curr_pl, Bit64u data)
{
bx_bool user = (curr_pl == 3);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 7);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
bx_address lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
#else

View File

@ -590,10 +590,10 @@ void BX_CPU_C::prefetch(void)
BX_CPU_THIS_PTR clear_RF();
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_ITLB_ENTRY_OF(laddr, 0);
Bit8u *fetchPtr = 0;
if ((tlbEntry->lpf == lpf) && (tlbEntry->accessBits & (0x10 << USER_PL)) != 0) {
if ((tlbEntry->lpf == lpf) && (tlbEntry->accessBits & (1<<USER_PL)) != 0) {
BX_CPU_THIS_PTR pAddrFetchPage = tlbEntry->ppf;
fetchPtr = (Bit8u*) tlbEntry->hostPageAddr;
}

View File

@ -1167,8 +1167,10 @@ public: // for now...
#define BX_INSTR_FAR_BRANCH_ORIGIN()
#endif
#define BX_TLB_SIZE 2048
TLB<BX_TLB_SIZE> TLB BX_CPP_AlignN(16);
#define BX_DTLB_SIZE 2048
#define BX_ITLB_SIZE 2048
TLB<BX_DTLB_SIZE> DTLB BX_CPP_AlignN(16);
TLB<BX_ITLB_SIZE> ITLB BX_CPP_AlignN(16);
#if BX_CPU_LEVEL >= 6
struct {

View File

@ -483,22 +483,41 @@ void BX_CPU_C::register_state(void)
BXRS_PARAM_BOOL(cpu, in_smm, in_smm);
#if BX_DEBUGGER
bx_list_c *tlb = new bx_list_c(cpu, "TLB");
bx_list_c *dtlb = new bx_list_c(cpu, "DTLB");
#if BX_CPU_LEVEL >= 5
BXRS_PARAM_BOOL(tlb, split_large, TLB.split_large);
BXRS_PARAM_BOOL(dtlb, split_large, DTLB.split_large);
#endif
for (n=0; n<BX_TLB_SIZE; n++) {
for (n=0; n<BX_DTLB_SIZE; n++) {
sprintf(name, "entry%u", n);
bx_list_c *tlb_entry = new bx_list_c(tlb, name);
BXRS_HEX_PARAM_FIELD(tlb_entry, lpf, TLB.entry[n].lpf);
BXRS_HEX_PARAM_FIELD(tlb_entry, lpf_mask, TLB.entry[n].lpf_mask);
BXRS_HEX_PARAM_FIELD(tlb_entry, ppf, TLB.entry[n].ppf);
BXRS_HEX_PARAM_FIELD(tlb_entry, accessBits, TLB.entry[n].accessBits);
bx_list_c *tlb_entry = new bx_list_c(dtlb, name);
BXRS_HEX_PARAM_FIELD(tlb_entry, lpf, DTLB.entry[n].lpf);
BXRS_HEX_PARAM_FIELD(tlb_entry, lpf_mask, DTLB.entry[n].lpf_mask);
BXRS_HEX_PARAM_FIELD(tlb_entry, ppf, DTLB.entry[n].ppf);
BXRS_HEX_PARAM_FIELD(tlb_entry, accessBits, DTLB.entry[n].accessBits);
#if BX_SUPPORT_PKEYS
BXRS_HEX_PARAM_FIELD(tlb_entry, pkey, TLB.entry[n].pkey);
BXRS_HEX_PARAM_FIELD(tlb_entry, pkey, DTLB.entry[n].pkey);
#endif
#if BX_SUPPORT_MEMTYPE
BXRS_HEX_PARAM_FIELD(tlb_entry, memtype, TLB.entry[n].memtype);
BXRS_HEX_PARAM_FIELD(tlb_entry, memtype, DTLB.entry[n].memtype);
#endif
}
bx_list_c *itlb = new bx_list_c(cpu, "ITLB");
#if BX_CPU_LEVEL >= 5
BXRS_PARAM_BOOL(itlb, split_large, ITLB.split_large);
#endif
for (n=0; n<BX_ITLB_SIZE; n++) {
sprintf(name, "entry%u", n);
bx_list_c *tlb_entry = new bx_list_c(itlb, name);
BXRS_HEX_PARAM_FIELD(tlb_entry, lpf, ITLB.entry[n].lpf);
BXRS_HEX_PARAM_FIELD(tlb_entry, lpf_mask, ITLB.entry[n].lpf_mask);
BXRS_HEX_PARAM_FIELD(tlb_entry, ppf, ITLB.entry[n].ppf);
BXRS_HEX_PARAM_FIELD(tlb_entry, accessBits, ITLB.entry[n].accessBits);
#if BX_SUPPORT_PKEYS
BXRS_HEX_PARAM_FIELD(tlb_entry, pkey, ITLB.entry[n].pkey);
#endif
#if BX_SUPPORT_MEMTYPE
BXRS_HEX_PARAM_FIELD(tlb_entry, memtype, ITLB.entry[n].memtype);
#endif
}
#endif

View File

@ -344,7 +344,7 @@ const Bit64u BX_CR3_PAGING_MASK = BX_CONST64(0x000ffffffffff000);
// result when the direct access is not allowed.
//
#define TLB_NoHostPtr (0x800) /* set this bit when direct access is NOT allowed */
const Bit32u TLB_NoHostPtr = 0x800; /* set this bit when direct access is NOT allowed */
#include "cpustats.h"
@ -357,7 +357,8 @@ void BX_CPU_C::TLB_flush(void)
invalidate_prefetch_q();
invalidate_stack_cache();
BX_CPU_THIS_PTR TLB.flush();
BX_CPU_THIS_PTR DTLB.flush();
BX_CPU_THIS_PTR ITLB.flush();
#if BX_SUPPORT_MONITOR_MWAIT
// invalidating of the TLB might change translation for monitored page
@ -377,7 +378,8 @@ void BX_CPU_C::TLB_flushNonGlobal(void)
invalidate_prefetch_q();
invalidate_stack_cache();
BX_CPU_THIS_PTR TLB.flushNonGlobal();
BX_CPU_THIS_PTR DTLB.flushNonGlobal();
BX_CPU_THIS_PTR ITLB.flushNonGlobal();
#if BX_SUPPORT_MONITOR_MWAIT
// invalidating of the TLB might change translation for monitored page
@ -396,7 +398,8 @@ void BX_CPU_C::TLB_invlpg(bx_address laddr)
invalidate_stack_cache();
BX_DEBUG(("TLB_invlpg(0x" FMT_ADDRX "): invalidate TLB entry", laddr));
BX_CPU_THIS_PTR TLB.invlpg(laddr);
BX_CPU_THIS_PTR DTLB.invlpg(laddr);
BX_CPU_THIS_PTR ITLB.invlpg(laddr);
#if BX_SUPPORT_MONITOR_MWAIT
// invalidating of the TLB entry might change translation for monitored
@ -1147,7 +1150,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_TLB_entry *tlbEntry, bx_address lad
return paddress;
}
#else
if (tlbEntry->accessBits & (1 << (/*(isExecute<<2) |*/ (isWrite<<1) | user)))
if (tlbEntry->accessBits & (1 << ((isWrite<<1) | user)))
return paddress;
#endif
@ -1198,8 +1201,12 @@ bx_phy_address BX_CPU_C::translate_linear(bx_TLB_entry *tlbEntry, bx_address lad
paddress = (paddress & ~((Bit64u) lpf_mask)) | (laddr & lpf_mask);
#if BX_CPU_LEVEL >= 5
if (lpf_mask > 0xfff)
BX_CPU_THIS_PTR TLB.split_large = 1;
if (lpf_mask > 0xfff) {
if (isExecute)
BX_CPU_THIS_PTR ITLB.split_large = true;
else
BX_CPU_THIS_PTR DTLB.split_large = true;
}
#endif
}
else {
@ -1233,11 +1240,14 @@ bx_phy_address BX_CPU_C::translate_linear(bx_TLB_entry *tlbEntry, bx_address lad
tlbEntry->ppf = ppf;
tlbEntry->accessBits = 0;
tlbEntry->accessBits |= TLB_SysReadOK;
if (isWrite)
tlbEntry->accessBits |= TLB_SysWriteOK;
if (isExecute)
if (isExecute) {
tlbEntry->accessBits |= TLB_SysExecuteOK;
}
else {
tlbEntry->accessBits |= TLB_SysReadOK;
if (isWrite)
tlbEntry->accessBits |= TLB_SysWriteOK;
}
if (! BX_CPU_THIS_PTR cr0.get_PG()
#if BX_SUPPORT_VMX >= 2
@ -1247,29 +1257,35 @@ bx_phy_address BX_CPU_C::translate_linear(bx_TLB_entry *tlbEntry, bx_address lad
&& ! (BX_CPU_THIS_PTR in_svm_guest && SVM_NESTED_PAGING_ENABLED)
#endif
) {
tlbEntry->accessBits |= TLB_UserReadOK |
TLB_UserWriteOK |
TLB_UserExecuteOK;
if (isExecute)
tlbEntry->accessBits |= TLB_UserExecuteOK;
else
tlbEntry->accessBits |= TLB_UserReadOK | TLB_UserWriteOK;
}
else {
if ((combined_access & 4) != 0) { // User Page
if (user) {
tlbEntry->accessBits |= TLB_UserReadOK;
if (isWrite)
tlbEntry->accessBits |= TLB_UserWriteOK;
if (isExecute)
if (isExecute) {
tlbEntry->accessBits |= TLB_UserExecuteOK;
}
else {
tlbEntry->accessBits |= TLB_UserReadOK;
if (isWrite)
tlbEntry->accessBits |= TLB_UserWriteOK;
}
}
#if BX_CPU_LEVEL >= 6
if (BX_CPU_THIS_PTR cr4.get_SMEP())
tlbEntry->accessBits &= ~TLB_SysExecuteOK;
if (BX_CPU_THIS_PTR cr4.get_SMAP())
tlbEntry->accessBits &= ~(TLB_SysReadOK | TLB_SysWriteOK);
if (isExecute) {
if (BX_CPU_THIS_PTR cr4.get_SMEP())
tlbEntry->accessBits &= ~TLB_SysExecuteOK;
}
else {
if (BX_CPU_THIS_PTR cr4.get_SMAP())
tlbEntry->accessBits &= ~(TLB_SysReadOK | TLB_SysWriteOK);
}
#endif
}
}
@ -2160,7 +2176,7 @@ int BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr_
bx_bool user = (curr_pl == 3);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
#if BX_SUPPORT_X86_64
if (! IsCanonical(laddr)) {
@ -2212,7 +2228,7 @@ int BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr_
}
#endif
bx_TLB_entry *tlbEntry2 = BX_TLB_ENTRY_OF(laddr2, 0);
bx_TLB_entry *tlbEntry2 = BX_DTLB_ENTRY_OF(laddr2, 0);
BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(tlbEntry, laddr, user, BX_WRITE);
BX_CPU_THIS_PTR address_xlation.paddress2 = translate_linear(tlbEntry2, laddr2, user, BX_WRITE);
@ -2280,7 +2296,7 @@ int BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_p
}
#endif
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
/* check for reference across multiple pages */
if ((pageOffset + len) <= 4096) {
@ -2313,7 +2329,7 @@ int BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_p
}
#endif
bx_TLB_entry *tlbEntry2 = BX_TLB_ENTRY_OF(laddr2, 0);
bx_TLB_entry *tlbEntry2 = BX_DTLB_ENTRY_OF(laddr2, 0);
BX_CPU_THIS_PTR address_xlation.paddress1 = translate_linear(tlbEntry, laddr, user, xlate_rw);
BX_CPU_THIS_PTR address_xlation.paddress2 = translate_linear(tlbEntry2, laddr2, user, xlate_rw);
@ -2412,14 +2428,24 @@ bx_hostpageaddr_t BX_CPU_C::getHostMemAddr(bx_phy_address paddr, unsigned rw)
#if BX_LARGE_RAMFILE
bx_bool BX_CPU_C::check_addr_in_tlb_buffers(const Bit8u *addr, const Bit8u *end)
{
for (unsigned tlb_entry_num=0; tlb_entry_num < BX_TLB_SIZE; tlb_entry_num++) {
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlb_entry_num];
for (unsigned tlb_entry_num=0; tlb_entry_num < BX_DTLB_SIZE; tlb_entry_num++) {
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR DTLB.entry[tlb_entry_num];
if (tlbEntry->valid()) {
if (((tlbEntry->hostPageAddr) >= (const bx_hostpageaddr_t)addr) &&
((tlbEntry->hostPageAddr) < (const bx_hostpageaddr_t)end))
return true;
}
}
for (unsigned tlb_entry_num=0; tlb_entry_num < BX_ITLB_SIZE; tlb_entry_num++) {
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR ITLB.entry[tlb_entry_num];
if (tlbEntry->valid()) {
if (((tlbEntry->hostPageAddr) >= (const bx_hostpageaddr_t)addr) &&
((tlbEntry->hostPageAddr) < (const bx_hostpageaddr_t)end))
return true;
}
}
return false;
}
#endif

View File

@ -108,7 +108,7 @@ void BX_CPP_AttrRegparmN(2) BX_CPU_C::stackPrefetch(bx_address offset, unsigned
}
Bit64u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = BX_TLB_ENTRY_OF(laddr, 0);
bx_TLB_entry *tlbEntry = BX_DTLB_ENTRY_OF(laddr, 0);
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access from this CPL
// Assuming that we always can read if write access is OK

View File

@ -196,8 +196,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
// used in the task switch are paged in.
if (BX_CPU_THIS_PTR cr0.get_PG())
{
translate_linear(BX_TLB_ENTRY_OF(nbase32, 0), nbase32, 0, BX_READ); // old TSS
translate_linear(BX_TLB_ENTRY_OF(nbase32 + new_TSS_max, 0), nbase32 + new_TSS_max, 0, BX_READ);
translate_linear(BX_DTLB_ENTRY_OF(nbase32, 0), nbase32, 0, BX_READ); // old TSS
translate_linear(BX_DTLB_ENTRY_OF(nbase32 + new_TSS_max, 0), nbase32 + new_TSS_max, 0, BX_READ);
// ??? Humm, we check the new TSS region with READ above,
// but sometimes we need to write the link field in that
@ -208,8 +208,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
if (source == BX_TASK_FROM_CALL || source == BX_TASK_FROM_INT)
{
translate_linear(BX_TLB_ENTRY_OF(nbase32, 0), nbase32, 0, BX_WRITE);
translate_linear(BX_TLB_ENTRY_OF(nbase32 + 1, 0), nbase32 + 1, 0, BX_WRITE);
translate_linear(BX_DTLB_ENTRY_OF(nbase32, 0), nbase32, 0, BX_WRITE);
translate_linear(BX_DTLB_ENTRY_OF(nbase32 + 1, 0), nbase32 + 1, 0, BX_WRITE);
}
}
@ -252,8 +252,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
if (BX_CPU_THIS_PTR cr0.get_PG()) {
Bit32u start = Bit32u(obase32 + 14), end = Bit32u(obase32 + 41);
translate_linear(BX_TLB_ENTRY_OF(start, 0), start, 0, BX_WRITE);
translate_linear(BX_TLB_ENTRY_OF(end, 0), end, 0, BX_WRITE);
translate_linear(BX_DTLB_ENTRY_OF(start, 0), start, 0, BX_WRITE);
translate_linear(BX_DTLB_ENTRY_OF(end, 0), end, 0, BX_WRITE);
}
system_write_word(Bit32u(obase32 + 14), IP);
@ -281,8 +281,8 @@ void BX_CPU_C::task_switch(bxInstruction_c *i, bx_selector_t *tss_selector,
if (BX_CPU_THIS_PTR cr0.get_PG()) {
Bit32u start = Bit32u(obase32 + 0x20), end = Bit32u(obase32 + 0x5d);
translate_linear(BX_TLB_ENTRY_OF(start, 0), start, 0, BX_WRITE);
translate_linear(BX_TLB_ENTRY_OF(end, 0), end, 0, BX_WRITE);
translate_linear(BX_DTLB_ENTRY_OF(start, 0), start, 0, BX_WRITE);
translate_linear(BX_DTLB_ENTRY_OF(end, 0), end, 0, BX_WRITE);
}
system_write_dword(Bit32u(obase32 + 0x20), EIP);

View File

@ -55,8 +55,11 @@ BX_CPP_INLINE bx_address AlignedAccessLPFOf(bx_address laddr, unsigned alignment
// There will be a many-to-one mapping to each TLB cache slot.
// When there are collisions, the old entry is overwritten with
// one for the newest access.
#define BX_TLB_ENTRY_OF(lpf, len) (BX_CPU_THIS_PTR TLB.get_entry_of((lpf), (len)))
#define BX_TLB_INDEX_OF(lpf, len) (BX_CPU_THIS_PTR TLB.get_index_of((lpf), (len)))
#define BX_DTLB_ENTRY_OF(lpf, len) (BX_CPU_THIS_PTR DTLB.get_entry_of((lpf), (len)))
#define BX_DTLB_INDEX_OF(lpf, len) (BX_CPU_THIS_PTR DTLB.get_index_of((lpf), (len)))
#define BX_ITLB_ENTRY_OF(lpf, len) (BX_CPU_THIS_PTR ITLB.get_entry_of((lpf), (len)))
#define BX_ITLB_INDEX_OF(lpf, len) (BX_CPU_THIS_PTR ITLB.get_index_of((lpf), (len)))
typedef bx_ptr_equiv_t bx_hostpageaddr_t;
@ -66,13 +69,15 @@ const bx_address BX_INVALID_TLB_ENTRY = BX_CONST64(0xffffffffffffffff);
const bx_address BX_INVALID_TLB_ENTRY = 0xffffffff;
#endif
// accessBits
// accessBits in DTLB
const Bit32u TLB_SysReadOK = 0x01;
const Bit32u TLB_UserReadOK = 0x02;
const Bit32u TLB_SysWriteOK = 0x04;
const Bit32u TLB_UserWriteOK = 0x08;
const Bit32u TLB_SysExecuteOK = 0x10;
const Bit32u TLB_UserExecuteOK = 0x20;
// accessBits in ITLB
const Bit32u TLB_SysExecuteOK = 0x01;
const Bit32u TLB_UserExecuteOK = 0x02;
// global
const Bit32u TLB_GlobalPage = 0x80000000;
#if BX_SUPPORT_PKEYS