Speedup simulation by eliminating CPL==3 check from read/write_virtual* functions

This commit is contained in:
Stanislav Shwartsman 2007-12-16 21:03:46 +00:00
parent de5838ce80
commit 46366b5064
9 changed files with 202 additions and 213 deletions

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access.cc,v 1.82 2007-12-16 20:47:09 sshwarts Exp $
// $Id: access.cc,v 1.83 2007-12-16 21:03:45 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -335,7 +335,7 @@ int BX_CPU_C::int_number(bx_segment_reg_t *seg)
#if BX_SupportGuest2HostTLB
Bit8u* BX_CPP_AttrRegparmN(2)
BX_CPU_C::v2h_read_byte(bx_address laddr, unsigned pl)
BX_CPU_C::v2h_read_byte(bx_address laddr, unsigned curr_pl)
{
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
bx_address lpf = LPFOf(laddr);
@ -343,7 +343,7 @@ BX_CPU_C::v2h_read_byte(bx_address laddr, unsigned pl)
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<pl)) { // Read this pl OK.
if (tlbEntry->accessBits & (1<<curr_pl)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = laddr & 0xfff;
Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
@ -355,7 +355,7 @@ BX_CPU_C::v2h_read_byte(bx_address laddr, unsigned pl)
}
Bit8u* BX_CPP_AttrRegparmN(2)
BX_CPU_C::v2h_write_byte(bx_address laddr, unsigned pl)
BX_CPU_C::v2h_write_byte(bx_address laddr, unsigned curr_pl)
{
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
bx_address lpf = LPFOf(laddr);
@ -364,7 +364,7 @@ BX_CPU_C::v2h_write_byte(bx_address laddr, unsigned pl)
{
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << curr_pl)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = laddr & 0xfff;
Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
@ -379,7 +379,7 @@ BX_CPU_C::v2h_write_byte(bx_address laddr, unsigned pl)
}
Bit16u* BX_CPP_AttrRegparmN(2)
BX_CPU_C::v2h_read_word(bx_address laddr, unsigned pl)
BX_CPU_C::v2h_read_word(bx_address laddr, unsigned curr_pl)
{
Bit32u pageOffset = laddr & 0xfff;
if (pageOffset <= 0xffe) { // Make sure access does not span 2 pages.
@ -389,7 +389,7 @@ BX_CPU_C::v2h_read_word(bx_address laddr, unsigned pl)
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<pl)) { // Read this pl OK.
if (tlbEntry->accessBits & (1<<curr_pl)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
return hostAddr;
@ -401,7 +401,7 @@ BX_CPU_C::v2h_read_word(bx_address laddr, unsigned pl)
}
Bit16u* BX_CPP_AttrRegparmN(2)
BX_CPU_C::v2h_write_word(bx_address laddr, unsigned pl)
BX_CPU_C::v2h_write_word(bx_address laddr, unsigned curr_pl)
{
Bit32u pageOffset = laddr & 0xfff;
if (pageOffset <= 0xffe) { // Make sure access does not span 2 pages.
@ -412,7 +412,7 @@ BX_CPU_C::v2h_write_word(bx_address laddr, unsigned pl)
{
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << curr_pl)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
@ -427,7 +427,7 @@ BX_CPU_C::v2h_write_word(bx_address laddr, unsigned pl)
}
Bit32u* BX_CPP_AttrRegparmN(2)
BX_CPU_C::v2h_read_dword(bx_address laddr, unsigned pl)
BX_CPU_C::v2h_read_dword(bx_address laddr, unsigned curr_pl)
{
Bit32u pageOffset = laddr & 0xfff;
if (pageOffset <= 0xffc) { // Make sure access does not span 2 pages.
@ -437,7 +437,7 @@ BX_CPU_C::v2h_read_dword(bx_address laddr, unsigned pl)
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<pl)) { // Read this pl OK.
if (tlbEntry->accessBits & (1<<curr_pl)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
return hostAddr;
@ -449,7 +449,7 @@ BX_CPU_C::v2h_read_dword(bx_address laddr, unsigned pl)
}
Bit32u* BX_CPP_AttrRegparmN(2)
BX_CPU_C::v2h_write_dword(bx_address laddr, unsigned pl)
BX_CPU_C::v2h_write_dword(bx_address laddr, unsigned curr_pl)
{
Bit32u pageOffset = laddr & 0xfff;
if (pageOffset <= 0xffc) { // Make sure access does not span 2 pages.
@ -460,7 +460,7 @@ BX_CPU_C::v2h_write_dword(bx_address laddr, unsigned pl)
{
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << curr_pl)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
@ -475,7 +475,7 @@ BX_CPU_C::v2h_write_dword(bx_address laddr, unsigned pl)
}
Bit64u* BX_CPP_AttrRegparmN(2)
BX_CPU_C::v2h_read_qword(bx_address laddr, unsigned pl)
BX_CPU_C::v2h_read_qword(bx_address laddr, unsigned curr_pl)
{
Bit32u pageOffset = laddr & 0xfff;
if (pageOffset <= 0xff8) { // Make sure access does not span 2 pages.
@ -485,7 +485,7 @@ BX_CPU_C::v2h_read_qword(bx_address laddr, unsigned pl)
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<pl)) { // Read this pl OK.
if (tlbEntry->accessBits & (1<<curr_pl)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
return hostAddr;
@ -497,7 +497,7 @@ BX_CPU_C::v2h_read_qword(bx_address laddr, unsigned pl)
}
Bit64u* BX_CPP_AttrRegparmN(2)
BX_CPU_C::v2h_write_qword(bx_address laddr, unsigned pl)
BX_CPU_C::v2h_write_qword(bx_address laddr, unsigned curr_pl)
{
Bit32u pageOffset = laddr & 0xfff;
if (pageOffset <= 0xff8) { // Make sure access does not span 2 pages.
@ -508,7 +508,7 @@ BX_CPU_C::v2h_write_qword(bx_address laddr, unsigned pl)
{
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << curr_pl)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
@ -530,11 +530,9 @@ BX_CPU_C::write_virtual_byte(unsigned s, bx_address offset, Bit8u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessWOK4G) == SegAccessWOK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_WRITE);
pl = (CPL==3);
#if BX_SupportGuest2HostTLB
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
bx_address lpf = LPFOf(laddr);
@ -542,7 +540,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = laddr & 0xfff;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE);
@ -561,7 +559,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 1, pl, BX_WRITE, (void *) data);
access_linear(laddr, 1, CPL, BX_WRITE, (void *) data);
return;
}
@ -580,11 +578,9 @@ BX_CPU_C::write_virtual_word(unsigned s, bx_address offset, Bit16u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessWOK4G) == SegAccessWOK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_WRITE);
pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 1) {
@ -602,7 +598,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE);
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
@ -621,7 +617,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 2, pl, BX_WRITE, (void *) data);
access_linear(laddr, 2, CPL, BX_WRITE, (void *) data);
return;
}
@ -640,11 +636,9 @@ BX_CPU_C::write_virtual_dword(unsigned s, bx_address offset, Bit32u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessWOK4G) == SegAccessWOK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_WRITE);
pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 3) {
@ -662,7 +656,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
@ -681,7 +675,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 4, pl, BX_WRITE, (void *) data);
access_linear(laddr, 4, CPL, BX_WRITE, (void *) data);
return;
}
@ -700,11 +694,9 @@ BX_CPU_C::write_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessWOK4G) == SegAccessWOK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_WRITE);
pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 7) {
@ -722,7 +714,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
@ -741,7 +733,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 8, pl, BX_WRITE, (void *) data);
access_linear(laddr, 8, CPL, BX_WRITE, (void *) data);
return;
}
@ -760,11 +752,9 @@ BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset, Bit8u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessROK4G) == SegAccessROK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_READ);
pl = (CPL==3);
#if BX_SupportGuest2HostTLB
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
bx_address lpf = LPFOf(laddr);
@ -772,7 +762,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<pl)) { // Read this pl OK.
if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = laddr & 0xfff;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ);
@ -788,7 +778,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 1, pl, BX_READ, (void *) data);
access_linear(laddr, 1, CPL, BX_READ, (void *) data);
return;
}
@ -807,11 +797,9 @@ BX_CPU_C::read_virtual_word(unsigned s, bx_address offset, Bit16u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessROK4G) == SegAccessROK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_READ);
pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 1) {
@ -829,7 +817,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<pl)) { // Read this pl OK.
if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_READ);
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
@ -845,7 +833,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 2, pl, BX_READ, (void *) data);
access_linear(laddr, 2, CPL, BX_READ, (void *) data);
return;
}
@ -864,11 +852,9 @@ BX_CPU_C::read_virtual_dword(unsigned s, bx_address offset, Bit32u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessROK4G) == SegAccessROK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_READ);
pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 3) {
@ -886,7 +872,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<pl)) { // Read this pl OK.
if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_READ);
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
@ -902,7 +888,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 4, pl, BX_READ, (void *) data);
access_linear(laddr, 4, CPL, BX_READ, (void *) data);
return;
}
@ -921,11 +907,9 @@ BX_CPU_C::read_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessROK4G) == SegAccessROK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_READ);
pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 7) {
@ -943,7 +927,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<pl)) { // Read this pl OK.
if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_READ);
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
@ -959,7 +943,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 8, pl, BX_READ, (void *) data);
access_linear(laddr, 8, CPL, BX_READ, (void *) data);
return;
}
@ -983,11 +967,9 @@ BX_CPU_C::read_RMW_virtual_byte(unsigned s, bx_address offset, Bit8u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessWOK4G) == SegAccessWOK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 1, BX_RW);
pl = (CPL==3);
#if BX_SupportGuest2HostTLB
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
bx_address lpf = LPFOf(laddr);
@ -995,7 +977,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = laddr & 0xfff;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW);
@ -1017,7 +999,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 1, pl, BX_RW, (void *) data);
access_linear(laddr, 1, CPL, BX_RW, (void *) data);
return;
}
@ -1036,11 +1018,9 @@ BX_CPU_C::read_RMW_virtual_word(unsigned s, bx_address offset, Bit16u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessWOK4G) == SegAccessWOK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_RW);
pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 1) {
@ -1058,7 +1038,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_RW);
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
@ -1078,7 +1058,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 2, pl, BX_RW, (void *) data);
access_linear(laddr, 2, CPL, BX_RW, (void *) data);
return;
}
@ -1097,11 +1077,9 @@ BX_CPU_C::read_RMW_virtual_dword(unsigned s, bx_address offset, Bit32u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessWOK4G) == SegAccessWOK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_RW);
pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 3) {
@ -1119,7 +1097,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW);
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
@ -1139,7 +1117,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 4, pl, BX_RW, (void *) data);
access_linear(laddr, 4, CPL, BX_RW, (void *) data);
return;
}
@ -1158,11 +1136,9 @@ BX_CPU_C::read_RMW_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
if ((seg->cache.valid & SegAccessWOK4G) == SegAccessWOK4G) {
unsigned pl;
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_RW);
pl = (CPL==3);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 7) {
@ -1180,7 +1156,7 @@ accessOK:
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x04 << pl)) {
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW);
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
@ -1200,7 +1176,7 @@ accessOK:
exception(int_number(seg), 0, 0);
}
#endif
access_linear(laddr, 8, pl, BX_RW, (void *) data);
access_linear(laddr, 8, CPL, BX_RW, (void *) data);
return;
}
@ -1423,7 +1399,7 @@ BX_CPU_C::write_virtual_tword(unsigned s, bx_address offset, floatx80 *data)
//
// assuming the write happens in legacy mode
void BX_CPU_C::write_new_stack_word(bx_segment_reg_t *seg, bx_address offset, bx_bool user, Bit16u data)
void BX_CPU_C::write_new_stack_word(bx_segment_reg_t *seg, bx_address offset, unsigned curr_pl, Bit16u data)
{
bx_address laddr;
@ -1442,14 +1418,14 @@ accessOK:
}
#endif
#if BX_SupportGuest2HostTLB
Bit16u *hostAddr = v2h_write_word(laddr, user);
Bit16u *hostAddr = v2h_write_word(laddr, curr_pl);
if (hostAddr) {
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | (laddr & 0xfff), 2, BX_WRITE);
WriteHostWordToLittleEndian(hostAddr, data);
return;
}
#endif
access_linear(laddr, 2, user, BX_WRITE, (void *) &data);
access_linear(laddr, 2, curr_pl, BX_WRITE, (void *) &data);
return;
}
@ -1462,7 +1438,7 @@ accessOK:
}
// assuming the write happens in legacy mode
void BX_CPU_C::write_new_stack_dword(bx_segment_reg_t *seg, bx_address offset, bx_bool user, Bit32u data)
void BX_CPU_C::write_new_stack_dword(bx_segment_reg_t *seg, bx_address offset, unsigned curr_pl, Bit32u data)
{
bx_address laddr;
@ -1481,14 +1457,14 @@ accessOK:
}
#endif
#if BX_SupportGuest2HostTLB
Bit32u *hostAddr = v2h_write_dword(laddr, user);
Bit32u *hostAddr = v2h_write_dword(laddr, curr_pl);
if (hostAddr) {
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | (laddr & 0xfff), 4, BX_WRITE);
WriteHostDWordToLittleEndian(hostAddr, data);
return;
}
#endif
access_linear(laddr, 4, user, BX_WRITE, (void *) &data);
access_linear(laddr, 4, curr_pl, BX_WRITE, (void *) &data);
return;
}
@ -1502,7 +1478,7 @@ accessOK:
// assuming the write happens in 64-bit mode
#if BX_SUPPORT_X86_64
void BX_CPU_C::write_new_stack_qword(bx_address offset, bx_bool user, Bit64u data)
void BX_CPU_C::write_new_stack_qword(bx_address offset, unsigned curr_pl, Bit64u data)
{
bx_address laddr = offset;
@ -1517,14 +1493,14 @@ void BX_CPU_C::write_new_stack_qword(bx_address offset, bx_bool user, Bit64u dat
}
#endif
#if BX_SupportGuest2HostTLB
Bit64u *hostAddr = v2h_write_qword(laddr, user);
Bit64u *hostAddr = v2h_write_qword(laddr, curr_pl);
if (hostAddr) {
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | (laddr & 0xfff), 8, BX_WRITE);
WriteHostQWordToLittleEndian(hostAddr, data);
return;
}
#endif
access_linear(laddr, 8, user, BX_WRITE, (void *) &data);
access_linear(laddr, 8, curr_pl, BX_WRITE, (void *) &data);
}
else {
BX_ERROR(("write_new_stack_qword(): canonical failure 0x%08x:%08x", GET32H(laddr), GET32L(laddr)));

View File

@ -1,5 +1,5 @@
////////////////////////////////////////////////////////////////////////
// $Id: call_far.cc,v 1.22 2007-11-17 23:28:30 sshwarts Exp $
// $Id: call_far.cc,v 1.23 2007-12-16 21:03:45 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2005 Stanislav Shwartsman
@ -362,7 +362,6 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
// add cpl to the selector value
new_stack.selector.value = (0xfffc & new_stack.selector.value) |
new_stack.selector.rpl;
bx_bool user = (cs_descriptor.dpl == 3);
if (ss_descriptor.u.segment.d_b)
temp_ESP = ESP_for_cpl_x;
@ -371,31 +370,31 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
// push pointer of old stack onto new stack
if (gate_descriptor.type==BX_386_CALL_GATE) {
write_new_stack_dword(&new_stack, temp_ESP-4, user, return_SS);
write_new_stack_dword(&new_stack, temp_ESP-8, user, return_ESP);
write_new_stack_dword(&new_stack, temp_ESP-4, cs_descriptor.dpl, return_SS);
write_new_stack_dword(&new_stack, temp_ESP-8, cs_descriptor.dpl, return_ESP);
temp_ESP -= 8;
for (unsigned i=param_count; i>0; i--) {
temp_ESP -= 4;
write_new_stack_dword(&new_stack, temp_ESP, user, parameter_dword[i-1]);
write_new_stack_dword(&new_stack, temp_ESP, cs_descriptor.dpl, parameter_dword[i-1]);
}
// push return address onto new stack
write_new_stack_dword(&new_stack, temp_ESP-4, user, return_CS);
write_new_stack_dword(&new_stack, temp_ESP-8, user, return_EIP);
write_new_stack_dword(&new_stack, temp_ESP-4, cs_descriptor.dpl, return_CS);
write_new_stack_dword(&new_stack, temp_ESP-8, cs_descriptor.dpl, return_EIP);
temp_ESP -= 8;
}
else {
write_new_stack_word(&new_stack, temp_ESP-2, user, return_SS);
write_new_stack_word(&new_stack, temp_ESP-4, user, (Bit16u) return_ESP);
write_new_stack_word(&new_stack, temp_ESP-2, cs_descriptor.dpl, return_SS);
write_new_stack_word(&new_stack, temp_ESP-4, cs_descriptor.dpl, (Bit16u) return_ESP);
temp_ESP -= 4;
for (unsigned i=param_count; i>0; i--) {
temp_ESP -= 2;
write_new_stack_word(&new_stack, temp_ESP, user, parameter_word[i-1]);
write_new_stack_word(&new_stack, temp_ESP, cs_descriptor.dpl, parameter_word[i-1]);
}
// push return address onto new stack
write_new_stack_word(&new_stack, temp_ESP-2, user, return_CS);
write_new_stack_word(&new_stack, temp_ESP-4, user, (Bit16u) return_EIP);
write_new_stack_word(&new_stack, temp_ESP-2, cs_descriptor.dpl, return_CS);
write_new_stack_word(&new_stack, temp_ESP-4, cs_descriptor.dpl, (Bit16u) return_EIP);
temp_ESP -= 4;
}
@ -523,14 +522,13 @@ BX_CPU_C::call_gate64(bx_selector_t *gate_selector)
Bit64u old_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
Bit64u old_RSP = RSP;
bx_bool user = (cs_descriptor.dpl == 3);
// push old stack long pointer onto new stack
write_new_stack_qword(RSP_for_cpl_x - 8, user, old_SS);
write_new_stack_qword(RSP_for_cpl_x - 16, user, old_RSP);
write_new_stack_qword(RSP_for_cpl_x - 8, cs_descriptor.dpl, old_SS);
write_new_stack_qword(RSP_for_cpl_x - 16, cs_descriptor.dpl, old_RSP);
// push long pointer to return address onto new stack
write_new_stack_qword(RSP_for_cpl_x - 24, user, old_CS);
write_new_stack_qword(RSP_for_cpl_x - 32, user, old_RIP);
write_new_stack_qword(RSP_for_cpl_x - 24, cs_descriptor.dpl, old_CS);
write_new_stack_qword(RSP_for_cpl_x - 32, cs_descriptor.dpl, old_RIP);
RSP_for_cpl_x -= 32;
// prepare new stack null SS selector

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.cc,v 1.190 2007-12-14 20:41:09 sshwarts Exp $
// $Id: cpu.cc,v 1.191 2007-12-16 21:03:45 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -860,7 +860,7 @@ void BX_CPU_C::prefetch(void)
if (BX_CPU_THIS_PTR cr0.get_PG()) {
// aligned block guaranteed to be all in one page, same A20 address
pAddr = itranslate_linear(laddr, CPL==3);
pAddr = itranslate_linear(laddr, CPL);
pAddr = A20ADDR(pAddr);
}
else

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.h,v 1.393 2007-12-16 20:47:09 sshwarts Exp $
// $Id: cpu.h,v 1.394 2007-12-16 21:03:45 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -3103,12 +3103,12 @@ public: // for now...
BX_SMF void read_virtual_tword(unsigned seg, bx_address offset, floatx80 *data) BX_CPP_AttrRegparmN(3);
#endif
// write of word/dword to new stack could happen only in legacy mode
BX_SMF void write_new_stack_word(bx_segment_reg_t *seg, bx_address offset, bx_bool user, Bit16u data);
BX_SMF void write_new_stack_dword(bx_segment_reg_t *seg, bx_address offset, bx_bool user, Bit32u data);
BX_SMF void write_new_stack_word(bx_segment_reg_t *seg, bx_address offset, unsigned curr_pl, Bit16u data);
BX_SMF void write_new_stack_dword(bx_segment_reg_t *seg, bx_address offset, unsigned curr_pl, Bit32u data);
#if BX_SUPPORT_X86_64
// write of qword to new stack could happen only in 64-bit mode
// (so stack segment is not relavant)
BX_SMF void write_new_stack_qword(bx_address offset, bx_bool user, Bit64u data);
BX_SMF void write_new_stack_qword(bx_address offset, unsigned curr_pl, Bit64u data);
#endif
#if BX_SUPPORT_MISALIGNED_SSE
@ -3136,14 +3136,14 @@ public: // for now...
BX_SMF void write_RMW_virtual_qword(Bit64u val64) BX_CPP_AttrRegparmN(1);
#if BX_SupportGuest2HostTLB
BX_SMF Bit8u* v2h_read_byte(bx_address laddr, unsigned pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u* v2h_read_word(bx_address laddr, unsigned pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u* v2h_read_dword(bx_address laddr, unsigned pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u* v2h_read_qword(bx_address laddr, unsigned pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit8u* v2h_write_byte(bx_address laddr, unsigned pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u* v2h_write_word(bx_address laddr, unsigned pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u* v2h_write_dword(bx_address laddr, unsigned pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u* v2h_write_qword(bx_address laddr, unsigned pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit8u* v2h_read_byte(bx_address laddr, unsigned curr_pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u* v2h_read_word(bx_address laddr, unsigned curr_pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u* v2h_read_dword(bx_address laddr, unsigned curr_pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u* v2h_read_qword(bx_address laddr, unsigned curr_pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit8u* v2h_write_byte(bx_address laddr, unsigned curr_pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u* v2h_write_word(bx_address laddr, unsigned curr_pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u* v2h_write_dword(bx_address laddr, unsigned curr_pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u* v2h_write_qword(bx_address laddr, unsigned curr_pl) BX_CPP_AttrRegparmN(2);
#endif
BX_SMF void branch_near32(Bit32u new_EIP) BX_CPP_AttrRegparmN(1);
@ -3180,19 +3180,21 @@ public: // for now...
BX_SMF void repeat_ZFL(bxInstruction_c *, BxExecutePtr_t execute);
// linear address for access_linear expected to be canonical !
BX_SMF void access_linear(bx_address address, unsigned length, unsigned pl,
BX_SMF void access_linear(bx_address address, unsigned length, unsigned curr_pl,
unsigned rw, void *data);
BX_SMF void page_fault(unsigned fault, bx_address laddr, unsigned pl, unsigned rw, unsigned access_type);
BX_SMF void page_fault(unsigned fault, bx_address laddr, unsigned user, unsigned rw, unsigned access_type);
// linear address for translate_linear expected to be canonical !
BX_SMF bx_phy_address translate_linear(bx_address laddr, unsigned pl, unsigned rw, unsigned access_type);
BX_SMF BX_CPP_INLINE bx_phy_address itranslate_linear(bx_address laddr, unsigned pl)
BX_SMF bx_phy_address translate_linear(bx_address laddr, unsigned curr_pl, unsigned rw, unsigned access_type);
BX_SMF BX_CPP_INLINE bx_phy_address itranslate_linear(bx_address laddr, unsigned curr_pl)
{
return translate_linear(laddr, pl, BX_READ, CODE_ACCESS);
return translate_linear(laddr, curr_pl, BX_READ, CODE_ACCESS);
}
BX_SMF BX_CPP_INLINE bx_phy_address dtranslate_linear(bx_address laddr, unsigned pl, unsigned rw)
BX_SMF BX_CPP_INLINE bx_phy_address dtranslate_linear(bx_address laddr, unsigned curr_pl, unsigned rw)
{
return translate_linear(laddr, pl, rw, DATA_ACCESS);
return translate_linear(laddr, curr_pl, rw, DATA_ACCESS);
}
BX_SMF void TLB_flush(bx_bool invalidateGlobal);
BX_SMF void TLB_invlpg(bx_address laddr);
BX_SMF void TLB_init(void);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: exception.cc,v 1.97 2007-11-30 08:49:12 sshwarts Exp $
// $Id: exception.cc,v 1.98 2007-12-16 21:03:45 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -189,20 +189,19 @@ void BX_CPU_C::long_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error_code
Bit64u old_RIP = RIP;
Bit64u old_SS = BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].selector.value;
Bit64u old_RSP = RSP;
bx_bool user = (cs_descriptor.dpl == 3);
// push old stack long pointer onto new stack
write_new_stack_qword(RSP_for_cpl_x - 8, user, old_SS);
write_new_stack_qword(RSP_for_cpl_x - 16, user, old_RSP);
write_new_stack_qword(RSP_for_cpl_x - 24, user, read_eflags());
write_new_stack_qword(RSP_for_cpl_x - 8, cs_descriptor.dpl, old_SS);
write_new_stack_qword(RSP_for_cpl_x - 16, cs_descriptor.dpl, old_RSP);
write_new_stack_qword(RSP_for_cpl_x - 24, cs_descriptor.dpl, read_eflags());
// push long pointer to return address onto new stack
write_new_stack_qword(RSP_for_cpl_x - 32, user, old_CS);
write_new_stack_qword(RSP_for_cpl_x - 40, user, old_RIP);
write_new_stack_qword(RSP_for_cpl_x - 32, cs_descriptor.dpl, old_CS);
write_new_stack_qword(RSP_for_cpl_x - 40, cs_descriptor.dpl, old_RIP);
RSP_for_cpl_x -= 40;
if (is_error_code) {
RSP_for_cpl_x -= 8;
write_new_stack_qword(RSP_for_cpl_x, user, error_code);
write_new_stack_qword(RSP_for_cpl_x, cs_descriptor.dpl, error_code);
}
bx_selector_t ss_selector;
@ -552,7 +551,6 @@ void BX_CPU_C::protected_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error
// add cpl to the selector value
new_stack.selector.value = (0xfffc & new_stack.selector.value) |
new_stack.selector.rpl;
bx_bool user = (cs_descriptor.dpl == 3);
if (ss_descriptor.u.segment.d_b)
temp_ESP = ESP_for_cpl_x;
@ -562,24 +560,24 @@ void BX_CPU_C::protected_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error
if (is_v8086_mode)
{
if (gate_descriptor.type>=14) { // 386 int/trap gate
write_new_stack_dword(&new_stack, temp_ESP-4, user,
write_new_stack_dword(&new_stack, temp_ESP-4, cs_descriptor.dpl,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value);
write_new_stack_dword(&new_stack, temp_ESP-8, user,
write_new_stack_dword(&new_stack, temp_ESP-8, cs_descriptor.dpl,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value);
write_new_stack_dword(&new_stack, temp_ESP-12, user,
write_new_stack_dword(&new_stack, temp_ESP-12, cs_descriptor.dpl,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value);
write_new_stack_dword(&new_stack, temp_ESP-16, user,
write_new_stack_dword(&new_stack, temp_ESP-16, cs_descriptor.dpl,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value);
temp_ESP -= 16;
}
else {
write_new_stack_word(&new_stack, temp_ESP-2, user,
write_new_stack_word(&new_stack, temp_ESP-2, cs_descriptor.dpl,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_GS].selector.value);
write_new_stack_word(&new_stack, temp_ESP-4, user,
write_new_stack_word(&new_stack, temp_ESP-4, cs_descriptor.dpl,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_FS].selector.value);
write_new_stack_word(&new_stack, temp_ESP-6, user,
write_new_stack_word(&new_stack, temp_ESP-6, cs_descriptor.dpl,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_DS].selector.value);
write_new_stack_word(&new_stack, temp_ESP-8, user,
write_new_stack_word(&new_stack, temp_ESP-8, cs_descriptor.dpl,
BX_CPU_THIS_PTR sregs[BX_SEG_REG_ES].selector.value);
temp_ESP -= 8;
}
@ -587,30 +585,30 @@ void BX_CPU_C::protected_mode_int(Bit8u vector, bx_bool is_INT, bx_bool is_error
if (gate_descriptor.type>=14) { // 386 int/trap gate
// push long pointer to old stack onto new stack
write_new_stack_dword(&new_stack, temp_ESP-4, user, old_SS);
write_new_stack_dword(&new_stack, temp_ESP-8, user, old_ESP);
write_new_stack_dword(&new_stack, temp_ESP-12, user, read_eflags());
write_new_stack_dword(&new_stack, temp_ESP-16, user, old_CS);
write_new_stack_dword(&new_stack, temp_ESP-20, user, old_EIP);
write_new_stack_dword(&new_stack, temp_ESP-4, cs_descriptor.dpl, old_SS);
write_new_stack_dword(&new_stack, temp_ESP-8, cs_descriptor.dpl, old_ESP);
write_new_stack_dword(&new_stack, temp_ESP-12, cs_descriptor.dpl, read_eflags());
write_new_stack_dword(&new_stack, temp_ESP-16, cs_descriptor.dpl, old_CS);
write_new_stack_dword(&new_stack, temp_ESP-20, cs_descriptor.dpl, old_EIP);
temp_ESP -= 20;
if (is_error_code) {
temp_ESP -= 4;
write_new_stack_dword(&new_stack, temp_ESP, user, error_code);
write_new_stack_dword(&new_stack, temp_ESP, cs_descriptor.dpl, error_code);
}
}
else { // 286 int/trap gate
// push long pointer to old stack onto new stack
write_new_stack_word(&new_stack, temp_ESP-2, user, old_SS);
write_new_stack_word(&new_stack, temp_ESP-4, user, (Bit16u) old_ESP);
write_new_stack_word(&new_stack, temp_ESP-6, user, read_flags());
write_new_stack_word(&new_stack, temp_ESP-8, user, old_CS);
write_new_stack_word(&new_stack, temp_ESP-10, user, (Bit16u) old_EIP);
write_new_stack_word(&new_stack, temp_ESP-2, cs_descriptor.dpl, old_SS);
write_new_stack_word(&new_stack, temp_ESP-4, cs_descriptor.dpl, (Bit16u) old_ESP);
write_new_stack_word(&new_stack, temp_ESP-6, cs_descriptor.dpl, read_flags());
write_new_stack_word(&new_stack, temp_ESP-8, cs_descriptor.dpl, old_CS);
write_new_stack_word(&new_stack, temp_ESP-10, cs_descriptor.dpl, (Bit16u) old_EIP);
temp_ESP -= 10;
if (is_error_code) {
temp_ESP -= 2;
write_new_stack_word(&new_stack, temp_ESP, user, error_code);
write_new_stack_word(&new_stack, temp_ESP, cs_descriptor.dpl, error_code);
}
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: io.cc,v 1.42 2007-10-29 15:39:18 sshwarts Exp $
// $Id: io.cc,v 1.43 2007-12-16 21:03:45 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -65,12 +65,12 @@ Bit32u BX_CPU_C::FastRepINSW(bxInstruction_c *i, bx_address dstOff, Bit16u port,
if (laddrDst & 1) return 0;
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, CPL==3);
hostAddrDst = v2h_write_byte(laddrDst, CPL);
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG())
paddrDst = dtranslate_linear(laddrDst, CPL==3, BX_WRITE);
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
else
paddrDst = laddrDst;
@ -192,12 +192,12 @@ Bit32u BX_CPU_C::FastRepOUTSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr
if (laddrSrc & 1) return 0;
#if BX_SupportGuest2HostTLB
hostAddrSrc = v2h_read_byte(laddrSrc, CPL==3);
hostAddrSrc = v2h_read_byte(laddrSrc, CPL);
#else
bx_phy_address paddrSrc;
if (BX_CPU_THIS_PTR cr0.get_PG())
paddrSrc = dtranslate_linear(laddrSrc, CPL==3, BX_READ);
paddrSrc = dtranslate_linear(laddrSrc, CPL, BX_READ);
else
paddrSrc = laddrSrc;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: paging.cc,v 1.98 2007-12-13 21:30:04 sshwarts Exp $
// $Id: paging.cc,v 1.99 2007-12-16 21:03:46 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -311,23 +311,26 @@ static unsigned priv_check[BX_PRIV_CHECK_SIZE];
// Each entry in the TLB cache has 3 entries:
//
// lpf: Linear Page Frame (page aligned linear address of page)
// bits 32..12 Linear page frame.
// bits 11..0 Invalidate index.
// bits 32..12 Linear page frame.
// bits 11...0 Invalidate index.
//
// ppf: Physical Page Frame (page aligned phy address of page)
//
// hostPageAddr:
// Host Page Frame address used for direct access to
// the mem.vector[] space allocated for the guest physical
// memory. If this is zero, it means that a pointer
// to the host space could not be generated, likely because
// that page of memory is not standard memory (it might
// be memory mapped IO, ROM, etc).
//
// accessBits:
// bits 32..11: Host Page Frame address used for direct access to
// the mem.vector[] space allocated for the guest physical
// memory. If this is zero, it means that a pointer
// to the host space could not be generated, likely because
// that page of memory is not standard memory (it might
// be memory mapped IO, ROM, etc).
// bits 9..10: (currently unused)
//
// bit 8: Page is a global page.
// bit 31: Page is a global page.
//
//
// The following 4 bits are used for a very efficient permissions
// The following bits are used for a very efficient permissions
// check. The goal is to be able, using only the current privilege
// level and access type, to determine if the page tables allow the
// access to occur or at least should rewalk the page tables. On
@ -339,43 +342,51 @@ static unsigned priv_check[BX_PRIV_CHECK_SIZE];
// value, necessitating a TLB flush when CR0.WP changes.
//
// The test is:
// OK = 0x1 << ( (W<<1) | U ) [W:1=write, 0=read, U:1=CPL3,0=CPL0-2]
// OK = 0x1 << ( (W<<2) | CPL ) [W:1=write, 0=read]
//
// Thus for reads, it is:
// OK = 0x1 << ( U )
// OK = 0x01 << ( CPL )
// And for writes:
// OK = 0x4 << ( U )
// OK = 0x10 << ( CPL )
//
// bit 7: a Write from User privilege is OK
// bit 6: a Write from System privilege is OK
// bit 5: a Read from User privilege is OK
// bit 4: a Read from System privilege is OK
// bit 15: a Write from CPL=3 is OK
// bit 14: a Write from CPL=2 is OK
// bit 13: a Write from CPL=1 is OK
// bit 12: a Write from CPL=0 is OK
//
// And the lowest 4 bits are as above, except that they also indicate
// bit 11: a Read from CPL=3 is OK
// bit 10: a Read from CPL=2 is OK
// bit 9: a Read from CPL=1 is OK
// bit 8: a Read from CPL=0 is OK
//
// And the lowest bits are as above, except that they also indicate
// that hostPageAddr is valid, so we do not separately need to test
// that pointer against NULL. These have smaller constants for us
// to be able to use smaller encodings in the trace generators. Note
// that whenever bit n (n=0,1,2,3) is set, then also n+4 is set.
// that whenever bit n (n=0..7) is set, then also n+8 is set.
// (The opposite is of course not true)
//
// bit 3: a Write from User privilege is OK, hostPageAddr is valid
// bit 2: a Write from System privilege is OK, hostPageAddr is valid
// bit 1: a Read from User privilege is OK, hostPageAddr is valid
// bit 0: a Read from System privilege is OK, hostPageAddr is valid
// bit 7: a Write from CPL=3 is OK, hostPageAddr is valid
// bit 6: a Write from CPL=2 is OK, hostPageAddr is valid
// bit 5: a Write from CPL=1 is OK, hostPageAddr is valid
// bit 4: a Write from CPL=0 is OK, hostPageAddr is valid
//
// bit 3: a Read from CPL=3 is OK, hostPageAddr is valid
// bit 2: a Read from CPL=2 is OK, hostPageAddr is valid
// bit 1: a Read from CPL=1 is OK, hostPageAddr is valid
// bit 0: a Read from CPL=0 is OK, hostPageAddr is valid
//
#define TLB_GlobalPage 0x100
#define TLB_WriteUserOK 0x8000
#define TLB_WriteSysOK 0x7000
#define TLB_ReadUserOK 0x0800
#define TLB_ReadSysOK 0x0700
#define TLB_WriteUserPtrOK 0x0080
#define TLB_WriteSysPtrOK 0x0070
#define TLB_ReadUserPtrOK 0x0008
#define TLB_ReadSysPtrOK 0x0007
#define TLB_WriteUserOK 0x80
#define TLB_WriteSysOK 0x40
#define TLB_ReadUserOK 0x20
#define TLB_ReadSysOK 0x10
#define TLB_WriteUserPtrOK 0x08
#define TLB_WriteSysPtrOK 0x04
#define TLB_ReadUserPtrOK 0x02
#define TLB_ReadSysPtrOK 0x01
#define PAGE_DIRECTORY_NX_BIT (BX_CONST64(0x8000000000000000))
#define TLB_GlobalPage 0x80000000
// === TLB Instrumentation section ==============================
@ -578,11 +589,11 @@ void BX_CPU_C::INVLPG(bxInstruction_c* i)
#define ERROR_RESERVED 0x08
#define ERROR_CODE_ACCESS 0x10
void BX_CPU_C::page_fault(unsigned fault, bx_address laddr, unsigned pl, unsigned rw, unsigned access_type)
void BX_CPU_C::page_fault(unsigned fault, bx_address laddr, unsigned user, unsigned rw, unsigned access_type)
{
unsigned error_code = fault;
error_code |= (pl << 2) | (rw << 1);
error_code |= (user << 2) | (rw << 1);
#if BX_SUPPORT_X86_64
if (BX_CPU_THIS_PTR efer.nxe && (access_type == CODE_ACCESS))
error_code |= ERROR_CODE_ACCESS; // I/D = 1
@ -604,8 +615,10 @@ void BX_CPU_C::page_fault(unsigned fault, bx_address laddr, unsigned pl, unsigne
#define PAGING_PML4_RESERVED_BITS 0x00000180 /* bits 7,8 */
#define PAGING_PDPE_RESERVED_BITS 0x00000180 /* bits 7,8 - we not support 1G paging */
#define PAGE_DIRECTORY_NX_BIT (BX_CONST64(0x8000000000000000))
// Translate a linear address to a physical address
bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned pl, unsigned rw, unsigned access_type)
bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, unsigned rw, unsigned access_type)
{
Bit32u accessBits, combined_access = 0;
unsigned priv_index;
@ -615,6 +628,8 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned pl, unsigne
bx_phy_address paddress, ppf, poffset;
bx_bool isWrite = (rw >= BX_WRITE); // write or r-m-w
unsigned pl = (curr_pl == 3);
poffset = laddr & 0x00000fff; // physical offset
#if BX_USE_TLB
@ -630,7 +645,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned pl, unsigne
paddress = tlbEntry->ppf | poffset;
accessBits = tlbEntry->accessBits;
if (accessBits & (0x10 << ((isWrite<<1) | pl)))
if (accessBits & (0x0100 << ((isWrite<<2) | curr_pl)))
return(paddress);
// The current access does not have permission according to the info
@ -1011,7 +1026,7 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned pl, unsigne
if (BX_CPU_THIS_PTR TLB.entry[TLB_index].hostPageAddr) {
// All access allowed also via direct pointer
accessBits |= (accessBits & 0xf0) >> 4;
accessBits |= (accessBits & 0xff00) >> 8;
}
#endif
BX_CPU_THIS_PTR TLB.entry[TLB_index].accessBits = accessBits;
@ -1096,7 +1111,7 @@ page_fault:
}
#endif
void BX_CPU_C::access_linear(bx_address laddr, unsigned len, unsigned pl, unsigned rw, void *data)
void BX_CPU_C::access_linear(bx_address laddr, unsigned len, unsigned curr_pl, unsigned rw, void *data)
{
#if BX_X86_DEBUGGER
hwbreakpoint_match(laddr, len, rw);
@ -1111,7 +1126,7 @@ void BX_CPU_C::access_linear(bx_address laddr, unsigned len, unsigned pl, unsign
if ( (pageOffset + len) <= 4096 ) {
// Access within single page.
BX_CPU_THIS_PTR address_xlation.paddress1 =
dtranslate_linear(laddr, pl, xlate_rw);
dtranslate_linear(laddr, curr_pl, xlate_rw);
BX_CPU_THIS_PTR address_xlation.pages = 1;
if (rw == BX_READ) {
@ -1129,14 +1144,14 @@ void BX_CPU_C::access_linear(bx_address laddr, unsigned len, unsigned pl, unsign
else {
// access across 2 pages
BX_CPU_THIS_PTR address_xlation.paddress1 =
dtranslate_linear(laddr, pl, xlate_rw);
dtranslate_linear(laddr, curr_pl, xlate_rw);
BX_CPU_THIS_PTR address_xlation.len1 = 4096 - pageOffset;
BX_CPU_THIS_PTR address_xlation.len2 = len -
BX_CPU_THIS_PTR address_xlation.len1;
BX_CPU_THIS_PTR address_xlation.pages = 2;
BX_CPU_THIS_PTR address_xlation.paddress2 =
dtranslate_linear(laddr + BX_CPU_THIS_PTR address_xlation.len1,
pl, xlate_rw);
curr_pl, xlate_rw);
#ifdef BX_LITTLE_ENDIAN
if (rw == BX_READ) {

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: proc_ctrl.cc,v 1.189 2007-12-14 20:41:09 sshwarts Exp $
// $Id: proc_ctrl.cc,v 1.190 2007-12-16 21:03:46 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -1943,7 +1943,7 @@ void BX_CPU_C::MONITOR(bxInstruction_c *i)
laddr = BX_CPU_THIS_PTR get_segment_base(i->seg()) + addr;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddr = dtranslate_linear(laddr, CPL==3, BX_READ);
paddr = dtranslate_linear(laddr, CPL, BX_READ);
paddr = A20ADDR(paddr);
}
else

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: string.cc,v 1.43 2007-11-20 17:15:33 sshwarts Exp $
// $Id: string.cc,v 1.44 2007-12-16 21:03:46 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -63,12 +63,12 @@ Bit32u BX_CPU_C::FastRepMOVSB(bxInstruction_c *i, unsigned srcSeg, bx_address sr
laddrSrc = BX_CPU_THIS_PTR get_segment_base(srcSeg) + srcOff;
#if BX_SupportGuest2HostTLB
hostAddrSrc = v2h_read_byte(laddrSrc, CPL==3);
hostAddrSrc = v2h_read_byte(laddrSrc, CPL);
#else
bx_phy_address paddrSrc;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrSrc = dtranslate_linear(laddrSrc, CPL==3, BX_READ);
paddrSrc = dtranslate_linear(laddrSrc, CPL, BX_READ);
}
else {
paddrSrc = laddrSrc;
@ -86,12 +86,12 @@ Bit32u BX_CPU_C::FastRepMOVSB(bxInstruction_c *i, unsigned srcSeg, bx_address sr
laddrDst = BX_CPU_THIS_PTR get_segment_base(dstSeg) + dstOff;
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, CPL==3);
hostAddrDst = v2h_write_byte(laddrDst, CPL);
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL==3, BX_WRITE);
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
}
else {
paddrDst = laddrDst;
@ -209,12 +209,12 @@ Bit32u BX_CPU_C::FastRepMOVSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr
laddrSrc = BX_CPU_THIS_PTR get_segment_base(srcSeg) + srcOff;
#if BX_SupportGuest2HostTLB
hostAddrSrc = v2h_read_byte(laddrSrc, CPL==3);
hostAddrSrc = v2h_read_byte(laddrSrc, CPL);
#else
bx_phy_address paddrSrc;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrSrc = dtranslate_linear(laddrSrc, CPL==3, BX_READ);
paddrSrc = dtranslate_linear(laddrSrc, CPL, BX_READ);
}
else {
paddrSrc = laddrSrc;
@ -232,12 +232,12 @@ Bit32u BX_CPU_C::FastRepMOVSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr
laddrDst = BX_CPU_THIS_PTR get_segment_base(dstSeg) + dstOff;
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, CPL==3);
hostAddrDst = v2h_write_byte(laddrDst, CPL);
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL==3, BX_WRITE);
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
}
else {
paddrDst = laddrDst;
@ -359,12 +359,12 @@ Bit32u BX_CPU_C::FastRepMOVSD(bxInstruction_c *i, unsigned srcSeg, bx_address sr
laddrSrc = BX_CPU_THIS_PTR get_segment_base(srcSeg) + srcOff;
#if BX_SupportGuest2HostTLB
hostAddrSrc = v2h_read_byte(laddrSrc, CPL==3);
hostAddrSrc = v2h_read_byte(laddrSrc, CPL);
#else
bx_phy_address paddrSrc;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrSrc = dtranslate_linear(laddrSrc, CPL==3, BX_READ);
paddrSrc = dtranslate_linear(laddrSrc, CPL, BX_READ);
}
else {
paddrSrc = laddrSrc;
@ -382,12 +382,12 @@ Bit32u BX_CPU_C::FastRepMOVSD(bxInstruction_c *i, unsigned srcSeg, bx_address sr
laddrDst = BX_CPU_THIS_PTR get_segment_base(dstSeg) + dstOff;
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, CPL==3);
hostAddrDst = v2h_write_byte(laddrDst, CPL);
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL==3, BX_WRITE);
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
}
else {
paddrDst = laddrDst;
@ -503,12 +503,12 @@ Bit32u BX_CPU_C::FastRepSTOSB(bxInstruction_c *i, unsigned dstSeg, bx_address ds
laddrDst = BX_CPU_THIS_PTR get_segment_base(dstSeg) + dstOff;
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, CPL==3);
hostAddrDst = v2h_write_byte(laddrDst, CPL);
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL==3, BX_WRITE);
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
}
else {
paddrDst = laddrDst;
@ -603,12 +603,12 @@ Bit32u BX_CPU_C::FastRepSTOSW(bxInstruction_c *i, unsigned dstSeg, bx_address ds
laddrDst = BX_CPU_THIS_PTR get_segment_base(dstSeg) + dstOff;
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, CPL==3);
hostAddrDst = v2h_write_byte(laddrDst, CPL);
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL==3, BX_WRITE);
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
}
else {
paddrDst = laddrDst;
@ -706,12 +706,12 @@ Bit32u BX_CPU_C::FastRepSTOSD(bxInstruction_c *i, unsigned dstSeg, bx_address ds
laddrDst = BX_CPU_THIS_PTR get_segment_base(dstSeg) + dstOff;
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, CPL==3);
hostAddrDst = v2h_write_byte(laddrDst, CPL);
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL==3, BX_WRITE);
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
}
else {
paddrDst = laddrDst;