For now : disable fetching from physical address 0xFFFFFFF0 after #RESET
because ICACHE do not support physical address > mem.len. This is the first part of the fix, the rest coming soon
This commit is contained in:
parent
eebdc22a1c
commit
645e04860e
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: access.cc,v 1.47 2004-11-05 10:13:14 sshwarts Exp $
|
||||
// $Id: access.cc,v 1.48 2004-11-18 23:16:35 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
@ -298,7 +298,6 @@ accessOK:
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
{
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
bx_address lpf = LPFOf(laddr);
|
||||
if ((BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == BX_TLB_LPF_VALUE(lpf)))
|
||||
@ -312,23 +311,13 @@ accessOK:
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
|
||||
|
||||
#if BX_SUPPORT_ICACHE
|
||||
Bit32u *pageStamp;
|
||||
pageStamp = & BX_CPU_THIS_PTR iCache.pageWriteStampTable[
|
||||
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf>>12];
|
||||
#endif
|
||||
// Current write access has privilege.
|
||||
if (hostPageAddr
|
||||
if (hostPageAddr) {
|
||||
*hostAddr = *data;
|
||||
#if BX_SUPPORT_ICACHE
|
||||
&& (*pageStamp & ICacheWriteStampMask)
|
||||
BX_CPU_THIS_PTR iCache.decWriteStamp(BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf);
|
||||
#endif
|
||||
) {
|
||||
*hostAddr = *data;
|
||||
#if BX_SUPPORT_ICACHE
|
||||
(*pageStamp)--;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -336,8 +325,8 @@ accessOK:
|
||||
|
||||
access_linear(laddr, 1, pl, BX_WRITE, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 1);
|
||||
goto accessOK;
|
||||
}
|
||||
@ -359,7 +348,6 @@ accessOK:
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
{
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
if (pageOffset <= 0xffe) { // Make sure access does not span 2 pages.
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
@ -373,23 +361,13 @@ accessOK:
|
||||
bx_hostpageaddr_t hostPageAddr;
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
|
||||
|
||||
if (hostPageAddr) {
|
||||
WriteHostWordToLittleEndian(hostAddr, *data);
|
||||
#if BX_SUPPORT_ICACHE
|
||||
Bit32u *pageStamp;
|
||||
pageStamp = & BX_CPU_THIS_PTR iCache.pageWriteStampTable[
|
||||
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf>>12];
|
||||
BX_CPU_THIS_PTR iCache.decWriteStamp(BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf);
|
||||
#endif
|
||||
// Current write access has privilege.
|
||||
if (hostPageAddr
|
||||
#if BX_SUPPORT_ICACHE
|
||||
&& (*pageStamp & ICacheWriteStampMask)
|
||||
#endif
|
||||
) {
|
||||
WriteHostWordToLittleEndian(hostAddr, *data);
|
||||
#if BX_SUPPORT_ICACHE
|
||||
(*pageStamp)--;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -398,8 +376,8 @@ accessOK:
|
||||
|
||||
access_linear(laddr, 2, pl, BX_WRITE, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 2);
|
||||
goto accessOK;
|
||||
}
|
||||
@ -421,7 +399,6 @@ accessOK:
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
{
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
if (pageOffset <= 0xffc) { // Make sure access does not span 2 pages.
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
@ -436,23 +413,13 @@ accessOK:
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
|
||||
|
||||
#if BX_SUPPORT_ICACHE
|
||||
Bit32u *pageStamp;
|
||||
pageStamp = & BX_CPU_THIS_PTR iCache.pageWriteStampTable[
|
||||
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf>>12];
|
||||
#endif
|
||||
// Current write access has privilege.
|
||||
if (hostPageAddr
|
||||
#if BX_SUPPORT_ICACHE
|
||||
&& (*pageStamp & ICacheWriteStampMask)
|
||||
#endif
|
||||
) {
|
||||
if (hostPageAddr) {
|
||||
WriteHostDWordToLittleEndian(hostAddr, *data);
|
||||
#if BX_SUPPORT_ICACHE
|
||||
(*pageStamp)--;
|
||||
BX_CPU_THIS_PTR iCache.decWriteStamp(BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -461,12 +428,64 @@ accessOK:
|
||||
|
||||
access_linear(laddr, 4, pl, BX_WRITE, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 4);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::write_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
|
||||
{
|
||||
bx_address laddr;
|
||||
bx_segment_reg_t *seg;
|
||||
|
||||
seg = &BX_CPU_THIS_PTR sregs[s];
|
||||
if (seg->cache.valid & SegAccessWOK) {
|
||||
if ((IsLongMode() && IsCanonical(offset))
|
||||
|| (offset <= (seg->cache.u.segment.limit_scaled-7))) {
|
||||
unsigned pl;
|
||||
accessOK:
|
||||
laddr = seg->cache.u.segment.base + offset;
|
||||
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_WRITE);
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
if (pageOffset <= 0xff8) { // Make sure access does not span 2 pages.
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
bx_address lpf = LPFOf(laddr);
|
||||
if ((BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == BX_TLB_LPF_VALUE(lpf)))
|
||||
{
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
// from this CPL.
|
||||
Bit32u accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
|
||||
if (accessBits & (1 << (2 | pl))) {
|
||||
bx_hostpageaddr_t hostPageAddr;
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
|
||||
|
||||
// Current write access has privilege.
|
||||
if (hostPageAddr) {
|
||||
WriteHostQWordToLittleEndian(hostAddr, *data);
|
||||
#if BX_SUPPORT_ICACHE
|
||||
BX_CPU_THIS_PTR iCache.decWriteStamp(BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // BX_SupportGuest2HostTLB
|
||||
|
||||
access_linear(laddr, 8, pl, BX_WRITE, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 8);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::read_virtual_byte(unsigned s, bx_address offset, Bit8u *data)
|
||||
{
|
||||
@ -609,6 +628,54 @@ accessOK:
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::read_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
|
||||
{
|
||||
bx_address laddr;
|
||||
bx_segment_reg_t *seg;
|
||||
|
||||
seg = &BX_CPU_THIS_PTR sregs[s];
|
||||
if (seg->cache.valid & SegAccessROK) {
|
||||
if ((IsLongMode() && IsCanonical(offset))
|
||||
|| (offset <= (seg->cache.u.segment.limit_scaled-7))) {
|
||||
unsigned pl;
|
||||
accessOK:
|
||||
laddr = seg->cache.u.segment.base + offset;
|
||||
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_READ);
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
{
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
if (pageOffset <= 0xff8) { // Make sure access does not span 2 pages.
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
bx_address lpf = LPFOf(laddr);
|
||||
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == BX_TLB_LPF_VALUE(lpf)) {
|
||||
// See if the TLB entry privilege level allows us read access
|
||||
// from this CPL.
|
||||
Bit32u accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
|
||||
if (accessBits & (1<<pl)) { // Read this pl OK.
|
||||
bx_hostpageaddr_t hostPageAddr;
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
|
||||
if (hostPageAddr) {
|
||||
ReadHostQWordFromLittleEndian(hostAddr, *data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // BX_SupportGuest2HostTLB
|
||||
|
||||
access_linear(laddr, 8, pl, BX_READ, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
read_virtual_checks(seg, offset, 8);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////
|
||||
// special Read-Modify-Write operations //
|
||||
// address translation info is kept across read/write calls //
|
||||
@ -631,7 +698,6 @@ accessOK:
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
{
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
bx_address lpf = LPFOf(laddr);
|
||||
if ((BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == BX_TLB_LPF_VALUE(lpf)))
|
||||
@ -644,24 +710,15 @@ accessOK:
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
|
||||
#if BX_SUPPORT_ICACHE
|
||||
Bit32u *pageStamp;
|
||||
pageStamp = & BX_CPU_THIS_PTR iCache.pageWriteStampTable[
|
||||
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf>>12];
|
||||
#endif
|
||||
|
||||
// Current write access has privilege.
|
||||
if (hostPageAddr
|
||||
#if BX_SUPPORT_ICACHE
|
||||
&& (*pageStamp & ICacheWriteStampMask)
|
||||
#endif
|
||||
) {
|
||||
if (hostPageAddr) {
|
||||
*data = *hostAddr;
|
||||
BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
|
||||
#if BX_SUPPORT_ICACHE
|
||||
(*pageStamp)--;
|
||||
BX_CPU_THIS_PTR iCache.decWriteStamp(BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -671,13 +728,12 @@ accessOK:
|
||||
// old fashioned way...
|
||||
access_linear(laddr, 1, pl, BX_RW, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 1);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::read_RMW_virtual_word(unsigned s, bx_address offset, Bit16u *data)
|
||||
{
|
||||
@ -695,7 +751,6 @@ accessOK:
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
{
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
if (pageOffset <= 0xffe) { // Make sure access does not span 2 pages.
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
@ -709,24 +764,15 @@ accessOK:
|
||||
bx_hostpageaddr_t hostPageAddr;
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
|
||||
#if BX_SUPPORT_ICACHE
|
||||
Bit32u *pageStamp;
|
||||
pageStamp = & BX_CPU_THIS_PTR iCache.pageWriteStampTable[
|
||||
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf>>12];
|
||||
#endif
|
||||
|
||||
// Current write access has privilege.
|
||||
if (hostPageAddr
|
||||
#if BX_SUPPORT_ICACHE
|
||||
&& (*pageStamp & ICacheWriteStampMask)
|
||||
#endif
|
||||
) {
|
||||
if (hostPageAddr) {
|
||||
ReadHostWordFromLittleEndian(hostAddr, *data);
|
||||
BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
|
||||
#if BX_SUPPORT_ICACHE
|
||||
(*pageStamp)--;
|
||||
BX_CPU_THIS_PTR iCache.decWriteStamp(BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -735,8 +781,8 @@ accessOK:
|
||||
|
||||
access_linear(laddr, 2, pl, BX_RW, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 2);
|
||||
goto accessOK;
|
||||
}
|
||||
@ -758,7 +804,6 @@ accessOK:
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
{
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
if (pageOffset <= 0xffc) { // Make sure access does not span 2 pages.
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
@ -772,24 +817,15 @@ accessOK:
|
||||
bx_hostpageaddr_t hostPageAddr;
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
|
||||
#if BX_SUPPORT_ICACHE
|
||||
Bit32u *pageStamp;
|
||||
pageStamp = & BX_CPU_THIS_PTR iCache.pageWriteStampTable[
|
||||
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf>>12];
|
||||
#endif
|
||||
|
||||
// Current write access has privilege.
|
||||
if (hostPageAddr
|
||||
#if BX_SUPPORT_ICACHE
|
||||
&& (*pageStamp & ICacheWriteStampMask)
|
||||
#endif
|
||||
) {
|
||||
if (hostPageAddr) {
|
||||
ReadHostDWordFromLittleEndian(hostAddr, *data);
|
||||
BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
|
||||
#if BX_SUPPORT_ICACHE
|
||||
(*pageStamp)--;
|
||||
BX_CPU_THIS_PTR iCache.decWriteStamp(BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -798,12 +834,65 @@ accessOK:
|
||||
|
||||
access_linear(laddr, 4, pl, BX_RW, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 4);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::read_RMW_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
|
||||
{
|
||||
bx_address laddr;
|
||||
bx_segment_reg_t *seg;
|
||||
|
||||
seg = &BX_CPU_THIS_PTR sregs[s];
|
||||
if (seg->cache.valid & SegAccessWOK) {
|
||||
if ((IsLongMode() && IsCanonical(offset))
|
||||
|| (offset <= (seg->cache.u.segment.limit_scaled-7))) {
|
||||
unsigned pl;
|
||||
accessOK:
|
||||
laddr = seg->cache.u.segment.base + offset;
|
||||
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_RW);
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
if (pageOffset <= 0xff8) { // Make sure access does not span 2 pages.
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
bx_address lpf = LPFOf(laddr);
|
||||
if ((BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == BX_TLB_LPF_VALUE(lpf)))
|
||||
{
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
// from this CPL.
|
||||
Bit32u accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
|
||||
if (accessBits & (1 << (2 | pl))) {
|
||||
bx_hostpageaddr_t hostPageAddr;
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
|
||||
|
||||
// Current write access has privilege.
|
||||
if (hostPageAddr) {
|
||||
ReadHostQWordFromLittleEndian(hostAddr, *data);
|
||||
BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
|
||||
#if BX_SUPPORT_ICACHE
|
||||
BX_CPU_THIS_PTR iCache.decWriteStamp(BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // BX_SupportGuest2HostTLB
|
||||
|
||||
access_linear(laddr, 8, pl, BX_RW, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 8);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
void BX_CPP_AttrRegparmN(1)
|
||||
BX_CPU_C::write_RMW_virtual_byte(Bit8u val8)
|
||||
{
|
||||
@ -881,116 +970,6 @@ BX_CPU_C::write_RMW_virtual_dword(Bit32u val32)
|
||||
}
|
||||
}
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::write_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
|
||||
{
|
||||
bx_address laddr;
|
||||
bx_segment_reg_t *seg;
|
||||
|
||||
seg = &BX_CPU_THIS_PTR sregs[s];
|
||||
if (seg->cache.valid & SegAccessWOK) {
|
||||
if ((IsLongMode() && IsCanonical(offset))
|
||||
|| (offset <= (seg->cache.u.segment.limit_scaled-7))) {
|
||||
unsigned pl;
|
||||
accessOK:
|
||||
laddr = seg->cache.u.segment.base + offset;
|
||||
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_WRITE);
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
{
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
if (pageOffset <= 0xff8) { // Make sure access does not span 2 pages.
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
bx_address lpf = LPFOf(laddr);
|
||||
if ((BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == BX_TLB_LPF_VALUE(lpf)))
|
||||
{
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
// from this CPL.
|
||||
Bit32u accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
|
||||
if (accessBits & (1 << (2 | pl))) {
|
||||
bx_hostpageaddr_t hostPageAddr;
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
|
||||
#if BX_SUPPORT_ICACHE
|
||||
Bit32u *pageStamp;
|
||||
pageStamp = & BX_CPU_THIS_PTR iCache.pageWriteStampTable[
|
||||
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf>>12];
|
||||
#endif
|
||||
// Current write access has privilege.
|
||||
if (hostPageAddr
|
||||
#if BX_SUPPORT_ICACHE
|
||||
&& (*pageStamp & ICacheWriteStampMask)
|
||||
#endif
|
||||
) {
|
||||
WriteHostQWordToLittleEndian(hostAddr, *data);
|
||||
#if BX_SUPPORT_ICACHE
|
||||
(*pageStamp)--;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // BX_SupportGuest2HostTLB
|
||||
|
||||
access_linear(laddr, 8, pl, BX_WRITE, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 8);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::read_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
|
||||
{
|
||||
bx_address laddr;
|
||||
bx_segment_reg_t *seg;
|
||||
|
||||
seg = &BX_CPU_THIS_PTR sregs[s];
|
||||
if (seg->cache.valid & SegAccessROK) {
|
||||
if ((IsLongMode() && IsCanonical(offset))
|
||||
|| (offset <= (seg->cache.u.segment.limit_scaled-7))) {
|
||||
unsigned pl;
|
||||
accessOK:
|
||||
laddr = seg->cache.u.segment.base + offset;
|
||||
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_READ);
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
{
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
if (pageOffset <= 0xff8) { // Make sure access does not span 2 pages.
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
bx_address lpf = LPFOf(laddr);
|
||||
if (BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == BX_TLB_LPF_VALUE(lpf)) {
|
||||
// See if the TLB entry privilege level allows us read access
|
||||
// from this CPL.
|
||||
Bit32u accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
|
||||
if (accessBits & (1<<pl)) { // Read this pl OK.
|
||||
bx_hostpageaddr_t hostPageAddr;
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
|
||||
if (hostPageAddr) {
|
||||
ReadHostQWordFromLittleEndian(hostAddr, *data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // BX_SupportGuest2HostTLB
|
||||
|
||||
access_linear(laddr, 8, pl, BX_READ, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
read_virtual_checks(seg, offset, 8);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
void
|
||||
BX_CPU_C::write_RMW_virtual_qword(Bit64u val64)
|
||||
{
|
||||
@ -1026,69 +1005,6 @@ BX_CPU_C::write_RMW_virtual_qword(Bit64u val64)
|
||||
}
|
||||
}
|
||||
|
||||
void BX_CPP_AttrRegparmN(3)
|
||||
BX_CPU_C::read_RMW_virtual_qword(unsigned s, bx_address offset, Bit64u *data)
|
||||
{
|
||||
bx_address laddr;
|
||||
bx_segment_reg_t *seg;
|
||||
|
||||
seg = &BX_CPU_THIS_PTR sregs[s];
|
||||
if (seg->cache.valid & SegAccessWOK) {
|
||||
if ((IsLongMode() && IsCanonical(offset))
|
||||
|| (offset <= (seg->cache.u.segment.limit_scaled-7))) {
|
||||
unsigned pl;
|
||||
accessOK:
|
||||
laddr = seg->cache.u.segment.base + offset;
|
||||
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_RW);
|
||||
pl = (CPL==3);
|
||||
|
||||
#if BX_SupportGuest2HostTLB
|
||||
{
|
||||
Bit32u pageOffset = laddr & 0xfff;
|
||||
if (pageOffset <= 0xff8) { // Make sure access does not span 2 pages.
|
||||
Bit32u tlbIndex = BX_TLB_INDEX_OF(laddr);
|
||||
bx_address lpf = LPFOf(laddr);
|
||||
if ((BX_CPU_THIS_PTR TLB.entry[tlbIndex].lpf == BX_TLB_LPF_VALUE(lpf)))
|
||||
{
|
||||
// See if the TLB entry privilege level allows us write access
|
||||
// from this CPL.
|
||||
Bit32u accessBits = BX_CPU_THIS_PTR TLB.entry[tlbIndex].accessBits;
|
||||
if (accessBits & (1 << (2 | pl))) {
|
||||
bx_hostpageaddr_t hostPageAddr;
|
||||
hostPageAddr = BX_CPU_THIS_PTR TLB.entry[tlbIndex].hostPageAddr;
|
||||
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
|
||||
#if BX_SUPPORT_ICACHE
|
||||
Bit32u *pageStamp;
|
||||
pageStamp = & BX_CPU_THIS_PTR iCache.pageWriteStampTable[
|
||||
BX_CPU_THIS_PTR TLB.entry[tlbIndex].ppf>>12];
|
||||
#endif
|
||||
// Current write access has privilege.
|
||||
if (hostPageAddr
|
||||
#if BX_SUPPORT_ICACHE
|
||||
&& (*pageStamp & ICacheWriteStampMask)
|
||||
#endif
|
||||
) {
|
||||
ReadHostQWordFromLittleEndian(hostAddr, *data);
|
||||
BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
|
||||
#if BX_SUPPORT_ICACHE
|
||||
(*pageStamp)--;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif // BX_SupportGuest2HostTLB
|
||||
|
||||
access_linear(laddr, 8, pl, BX_RW, (void *) data);
|
||||
return;
|
||||
}
|
||||
}
|
||||
write_virtual_checks(seg, offset, 8);
|
||||
goto accessOK;
|
||||
}
|
||||
|
||||
//
|
||||
// Some macro defs to make things cleaner for endian-ness issues.
|
||||
// The following routines access a double qword, ie 16-bytes.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: cpu.cc,v 1.93 2004-11-15 19:38:42 sshwarts Exp $
|
||||
// $Id: cpu.cc,v 1.94 2004-11-18 23:16:35 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
@ -253,8 +253,8 @@ printf("CPU_LOOP %d\n", bx_guard.special_unwind_stack);
|
||||
// Therefore, in either case, we can keep the counter as-is and
|
||||
// replace the fetch mode bits.
|
||||
Bit32u fetchModeMask = BX_CPU_THIS_PTR iCache.fetchModeMask;
|
||||
pageWriteStamp &= 0x1fffffff; // Clear out old fetch mode bits.
|
||||
pageWriteStamp |= fetchModeMask; // Add in new ones.
|
||||
pageWriteStamp &= ICacheWriteStampMask; // Clear out old fetch mode bits.
|
||||
pageWriteStamp |= fetchModeMask; // Add in new ones.
|
||||
BX_CPU_THIS_PTR iCache.pageWriteStampTable[pAddr>>12] = pageWriteStamp;
|
||||
cache_entry->pAddr = pAddr;
|
||||
cache_entry->writeStamp = pageWriteStamp;
|
||||
@ -270,7 +270,6 @@ printf("CPU_LOOP %d\n", bx_guard.special_unwind_stack);
|
||||
execute = i->execute; // fetch as soon as possible for speculation.
|
||||
if (resolveModRM)
|
||||
BX_CPU_CALL_METHODR(resolveModRM, (i));
|
||||
|
||||
}
|
||||
|
||||
// An instruction will have been fetched using either the normal case,
|
||||
@ -753,20 +752,17 @@ BX_CPU_C::prefetch(void)
|
||||
}
|
||||
|
||||
#if BX_SUPPORT_ICACHE
|
||||
Bit32u pageWriteStamp;
|
||||
Bit32u fetchModeMask;
|
||||
Bit32u phyPageIndex;
|
||||
|
||||
phyPageIndex = pAddr >> 12;
|
||||
pageWriteStamp = BX_CPU_THIS_PTR iCache.pageWriteStampTable[phyPageIndex];
|
||||
fetchModeMask = BX_CPU_THIS_PTR iCache.fetchModeMask;
|
||||
if ( (pageWriteStamp & ICacheFetchModeMask ) != fetchModeMask) {
|
||||
Bit32u phyPageIndex = pAddr >> 12;
|
||||
Bit32u pageWriteStamp = BX_CPU_THIS_PTR iCache.pageWriteStampTable[phyPageIndex];
|
||||
Bit32u fetchModeMask = BX_CPU_THIS_PTR iCache.fetchModeMask;
|
||||
if ((pageWriteStamp & ICacheFetchModeMask) != fetchModeMask)
|
||||
{
|
||||
// The current CPU mode does not match iCache entries for this
|
||||
// physical page.
|
||||
pageWriteStamp &= ICacheWriteStampMask; // Clear out old fetch mode bits.
|
||||
pageWriteStamp |= fetchModeMask; // Add in new ones.
|
||||
pageWriteStamp |= fetchModeMask; // Add in new ones.
|
||||
BX_CPU_THIS_PTR iCache.pageWriteStampTable[phyPageIndex] = pageWriteStamp;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: cpu.h,v 1.186 2004-11-14 19:29:34 sshwarts Exp $
|
||||
// $Id: cpu.h,v 1.187 2004-11-18 23:16:35 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
@ -1553,13 +1553,15 @@ public: // for now...
|
||||
BX_SMF void MOV_ObAL(bxInstruction_c *);
|
||||
BX_SMF void MOV_OdEAX(bxInstruction_c *);
|
||||
BX_SMF void MOV_OwAX(bxInstruction_c *);
|
||||
BX_SMF void TEST_ALIb(bxInstruction_c *);
|
||||
BX_SMF void TEST_EAXId(bxInstruction_c *);
|
||||
BX_SMF void TEST_AXIw(bxInstruction_c *);
|
||||
|
||||
// repeatable instructions
|
||||
BX_SMF void MOVSB_XbYb(bxInstruction_c *);
|
||||
BX_SMF void MOVSW_XvYv(bxInstruction_c *);
|
||||
BX_SMF void CMPSB_XbYb(bxInstruction_c *);
|
||||
BX_SMF void CMPSW_XvYv(bxInstruction_c *);
|
||||
BX_SMF void TEST_ALIb(bxInstruction_c *);
|
||||
BX_SMF void TEST_EAXId(bxInstruction_c *);
|
||||
BX_SMF void TEST_AXIw(bxInstruction_c *);
|
||||
BX_SMF void STOSB_YbAL(bxInstruction_c *);
|
||||
BX_SMF void STOSW_YveAX(bxInstruction_c *);
|
||||
BX_SMF void LODSB_ALXb(bxInstruction_c *);
|
||||
@ -2652,9 +2654,9 @@ public: // for now...
|
||||
BX_SMF void Resolve64Mod1or2Base15(bxInstruction_c *) BX_CPP_AttrRegparmN(1);
|
||||
#endif // #if BX_SUPPORT_X86_64
|
||||
|
||||
BX_SMF int REP(bxInstruction_c *);
|
||||
BX_SMF int REP_ZF(bxInstruction_c *, unsigned rep_prefix);
|
||||
|
||||
BX_SMF void REP(void (*)(void));
|
||||
BX_SMF void REP_ZF(void (*)(void), unsigned rep_prefix);
|
||||
#if BX_DEBUGGER
|
||||
BX_SMF void dbg_take_irq(void);
|
||||
BX_SMF void dbg_force_interrupt(unsigned vector);
|
||||
|
@ -1,6 +1,4 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id:
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
//
|
||||
@ -29,9 +27,10 @@
|
||||
# define BX_ICACHE_H 1
|
||||
|
||||
#define BxICacheEntries (32 * 1024) // Must be a power of 2.
|
||||
// bit31: 1=CS is 32/64-bit, 0=CS is 16-bit.
|
||||
// bit30: 1=Long Mode, 0=not Long Mode.
|
||||
// bit29: 1=iCache page, 0=Data.
|
||||
|
||||
// bit31: 1=CS is 32/64-bit, 0=CS is 16-bit.
|
||||
// bit30: 1=Long Mode, 0=not Long Mode.
|
||||
// bit29: 1=iCache page, 0=Data.
|
||||
#define ICacheWriteStampInvalid 0x1fffffff
|
||||
#define ICacheWriteStampMax 0x1fffffff // Decrements from here.
|
||||
#define ICacheWriteStampMask 0x1fffffff
|
||||
@ -60,33 +59,33 @@ class BOCHSAPI bxICache_c
|
||||
|
||||
Bit32u fetchModeMask;
|
||||
|
||||
bxICache_c() {
|
||||
bxICache_c()
|
||||
{
|
||||
// Initially clear the iCache;
|
||||
memset(this, 0, sizeof(*this));
|
||||
pageWriteStampTable = NULL;
|
||||
for (unsigned i=0; i<BxICacheEntries; i++) {
|
||||
entry[i].writeStamp = ICacheWriteStampInvalid;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BX_CPP_INLINE void alloc(unsigned memSizeInBytes) {
|
||||
BX_CPP_INLINE void alloc(unsigned memSizeInBytes)
|
||||
{
|
||||
pageWriteStampTable =
|
||||
(Bit32u*) malloc(sizeof(Bit32u) * (memSizeInBytes>>12));
|
||||
for (unsigned i=0; i<(memSizeInBytes>>12); i++) {
|
||||
pageWriteStampTable[i] = ICacheWriteStampInvalid;
|
||||
}
|
||||
}
|
||||
fetchModeMask = 0; // CS is 16-bit, Long Mode disabled, Data page
|
||||
}
|
||||
|
||||
BX_CPP_INLINE void decWriteStamp(Bit32u a20Addr);
|
||||
|
||||
BX_CPP_INLINE void clear(void) {
|
||||
memset(this, 0, sizeof(*this));
|
||||
}
|
||||
|
||||
BX_CPP_INLINE unsigned hash(Bit32u pAddr) {
|
||||
BX_CPP_INLINE unsigned hash(Bit32u pAddr)
|
||||
{
|
||||
// A pretty dumb hash function for now.
|
||||
return pAddr & (BxICacheEntries-1);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
BX_CPP_INLINE void bxICache_c::decWriteStamp(Bit32u a20Addr)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
// $Id: init.cc,v 1.59 2004-11-16 19:19:11 sshwarts Exp $
|
||||
// $Id: init.cc,v 1.60 2004-11-18 23:16:36 sshwarts Exp $
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Copyright (C) 2001 MandrakeSoft S.A.
|
||||
@ -162,7 +162,7 @@ cpu_param_handler (bx_param_c *param, int set, Bit64s val)
|
||||
|
||||
void BX_CPU_C::init(BX_MEM_C *addrspace)
|
||||
{
|
||||
BX_DEBUG(( "Init $Id: init.cc,v 1.59 2004-11-16 19:19:11 sshwarts Exp $"));
|
||||
BX_DEBUG(( "Init $Id: init.cc,v 1.60 2004-11-18 23:16:36 sshwarts Exp $"));
|
||||
// BX_CPU_C constructor
|
||||
BX_CPU_THIS_PTR set_INTR (0);
|
||||
#if BX_SUPPORT_APIC
|
||||
@ -455,7 +455,6 @@ void BX_CPU_C::init(BX_MEM_C *addrspace)
|
||||
|
||||
#if BX_SUPPORT_ICACHE
|
||||
iCache.alloc(mem->len);
|
||||
iCache.fetchModeMask = 0; // KPL: fixme!!!
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -553,7 +552,7 @@ void BX_CPU_C::reset(unsigned source)
|
||||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.c_ed = 0; /* normal expand up */
|
||||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.r_w = 1; /* writeable */
|
||||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.a = 1; /* accessed */
|
||||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0xFFFF0000;
|
||||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.base = 0x000F0000;
|
||||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit = 0xFFFF;
|
||||
BX_CPU_THIS_PTR sregs[BX_SEG_REG_CS].cache.u.segment.limit_scaled = 0xFFFF;
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user