Canonical check have higher priority than #AC check

This commit is contained in:
Stanislav Shwartsman 2008-02-11 20:52:10 +00:00
parent 5812e375a2
commit 8d7410a852
6 changed files with 176 additions and 152 deletions

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access.cc,v 1.90 2008-02-02 21:46:49 sshwarts Exp $
// $Id: access.cc,v 1.91 2008-02-11 20:52:10 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -31,6 +31,13 @@
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
// The macro was made in order to optimize access alignment into TLB lookup -
// when aligment check is enabled a misaligned access will miss the TLB.
// BX_CPU_THIS_PTR alignment_check_mask must be initialized to all'ones if
// alignment check exception is enabled and LPF_MASK if not.
#define AlignedAccessLPFOf(laddr, alignment_mask) \
((laddr) & (LPF_MASK | (alignment_mask))) & (BX_CPU_THIS_PTR alignment_check_mask)
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_virtual_checks(bx_segment_reg_t *seg, bx_address offset, unsigned length)
{
@ -483,17 +490,9 @@ BX_CPU_C::write_virtual_word(unsigned s, bx_address offset, Bit16u data)
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_WRITE);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 1) {
BX_ERROR(("write_virtual_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -516,6 +515,14 @@ accessOK:
BX_ERROR(("write_virtual_word(): canonical failure"));
exception(int_number(seg), 0, 0);
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 1) {
BX_ERROR(("write_virtual_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 2, CPL, BX_WRITE, (void *) &data);
return;
@ -539,17 +546,9 @@ BX_CPU_C::write_virtual_dword(unsigned s, bx_address offset, Bit32u data)
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_WRITE);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 3) {
BX_ERROR(("write_virtual_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -572,6 +571,14 @@ accessOK:
BX_ERROR(("write_virtual_dword(): canonical failure"));
exception(int_number(seg), 0, 0);
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 3) {
BX_ERROR(("write_virtual_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 4, CPL, BX_WRITE, (void *) &data);
return;
@ -595,17 +602,9 @@ BX_CPU_C::write_virtual_qword(unsigned s, bx_address offset, Bit64u data)
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_WRITE);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 7) {
BX_ERROR(("write_virtual_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -628,6 +627,14 @@ accessOK:
BX_ERROR(("write_virtual_qword(): canonical failure"));
exception(int_number(seg), 0, 0);
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 7) {
BX_ERROR(("write_virtual_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 8, CPL, BX_WRITE, (void *) &data);
return;
@ -698,17 +705,9 @@ BX_CPU_C::read_virtual_word(unsigned s, bx_address offset)
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_READ);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 1) {
BX_ERROR(("read_virtual_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
@ -728,6 +727,14 @@ accessOK:
BX_ERROR(("read_virtual_word(): canonical failure"));
exception(int_number(seg), 0, 0);
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 1) {
BX_ERROR(("read_virtual_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 2, CPL, BX_READ, (void *) &data);
return data;
@ -752,17 +759,9 @@ BX_CPU_C::read_virtual_dword(unsigned s, bx_address offset)
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_READ);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 3) {
BX_ERROR(("read_virtual_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
@ -782,6 +781,14 @@ accessOK:
BX_ERROR(("read_virtual_dword(): canonical failure"));
exception(int_number(seg), 0, 0);
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 3) {
BX_ERROR(("read_virtual_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 4, CPL, BX_READ, (void *) &data);
return data;
@ -806,17 +813,9 @@ BX_CPU_C::read_virtual_qword(unsigned s, bx_address offset)
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_READ);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 7) {
BX_ERROR(("read_virtual_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
@ -836,6 +835,14 @@ accessOK:
BX_ERROR(("read_virtual_qword(): canonical failure"));
exception(int_number(seg), 0, 0);
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 7) {
BX_ERROR(("read_virtual_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 8, CPL, BX_READ, (void *) &data);
return data;
@ -917,17 +924,9 @@ BX_CPU_C::read_RMW_virtual_word(unsigned s, bx_address offset)
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_RW);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 1) {
BX_ERROR(("read_RMW_virtual_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -951,6 +950,14 @@ accessOK:
BX_ERROR(("read_RMW_virtual_word(): canonical failure"));
exception(int_number(seg), 0, 0);
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 1) {
BX_ERROR(("read_RMW_virtual_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 2, CPL, BX_RW, (void *) &data);
return data;
@ -975,17 +982,9 @@ BX_CPU_C::read_RMW_virtual_dword(unsigned s, bx_address offset)
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_RW);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 3) {
BX_ERROR(("read_RMW_virtual_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -1009,6 +1008,14 @@ accessOK:
BX_ERROR(("read_RMW_virtual_dword(): canonical failure"));
exception(int_number(seg), 0, 0);
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 3) {
BX_ERROR(("read_RMW_virtual_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 4, CPL, BX_RW, (void *) &data);
return data;
@ -1033,17 +1040,9 @@ BX_CPU_C::read_RMW_virtual_qword(unsigned s, bx_address offset)
accessOK:
laddr = BX_CPU_THIS_PTR get_segment_base(s) + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_RW);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 7) {
BX_ERROR(("read_RMW_virtual_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -1067,6 +1066,14 @@ accessOK:
BX_ERROR(("read_RMW_virtual_qword(): canonical failure"));
exception(int_number(seg), 0, 0);
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 7) {
BX_ERROR(("read_RMW_virtual_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 8, CPL, BX_RW, (void *) &data);
return data;
@ -1301,17 +1308,9 @@ void BX_CPU_C::write_new_stack_word(bx_segment_reg_t *seg, bx_address offset, un
accessOK:
laddr = seg->cache.u.segment.base + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 2, BX_WRITE);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 1) {
BX_ERROR(("write_new_stack_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -1328,6 +1327,14 @@ accessOK:
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 1) {
BX_ERROR(("write_new_stack_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 2, curr_pl, BX_WRITE, (void *) &data);
return;
@ -1352,17 +1359,9 @@ void BX_CPU_C::write_new_stack_dword(bx_segment_reg_t *seg, bx_address offset, u
accessOK:
laddr = seg->cache.u.segment.base + offset;
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 4, BX_WRITE);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 3) {
BX_ERROR(("write_new_stack_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
bx_address lpf = LPFOf(laddr);
bx_address lpf = AlignedAccessLPFOf(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -1379,6 +1378,14 @@ accessOK:
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 3) {
BX_ERROR(("write_new_stack_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 4, curr_pl, BX_WRITE, (void *) &data);
return;
@ -1396,42 +1403,43 @@ accessOK:
#if BX_SUPPORT_X86_64
void BX_CPU_C::write_new_stack_qword(bx_address laddr, unsigned curr_pl, Bit64u data)
{
if (IsCanonical(laddr)) {
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_WRITE);
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check) {
if (laddr & 7) {
BX_ERROR(("write_new_stack_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
BX_INSTR_MEM_DATA(BX_CPU_ID, laddr, 8, BX_WRITE);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
bx_address lpf = AlignedAccessLPFOf(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
WriteHostQWordToLittleEndian(hostAddr, data);
return;
}
WriteHostQWordToLittleEndian(hostAddr, data);
return;
}
#endif
access_linear(laddr, 8, curr_pl, BX_WRITE, (void *) &data);
}
else {
BX_ERROR(("write_new_stack_qword(): canonical failure 0x%08x:%08x", GET32H(laddr), GET32L(laddr)));
#endif
if (! IsCanonical(laddr)) {
BX_ERROR(("write_new_stack_qword(): canonical failure"));
exception(BX_SS_EXCEPTION, 0, 0);
}
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 7) {
BX_ERROR(("write_new_stack_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_linear(laddr, 8, curr_pl, BX_WRITE, (void *) &data);
}
#endif

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.h,v 1.421 2008-02-07 20:43:12 sshwarts Exp $
// $Id: cpu.h,v 1.422 2008-02-11 20:52:10 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -425,7 +425,7 @@ BOCHSAPI extern BX_CPU_C bx_cpu;
#define IMPLEMENT_EFLAG_ACCESSOR_AC(bitnum) \
BX_CPP_INLINE void BX_CPU_C::clear_AC () { \
BX_CPU_THIS_PTR eflags &= ~(1<<bitnum); \
BX_CPU_THIS_PTR alignment_check = 0; \
BX_CPU_THIS_PTR alignment_check_mask = LPF_MASK; \
} \
BX_CPP_INLINE Bit32u BX_CPU_C::get_AC() { \
return BX_CPU_THIS_PTR eflags & (1 << bitnum); \
@ -930,7 +930,7 @@ public: // for now...
bx_bool in_smm;
bx_bool nmi_disable;
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
bx_bool alignment_check;
bx_address alignment_check_mask;
#endif
#if BX_DEBUGGER
@ -953,12 +953,15 @@ public: // for now...
bx_TLB_entry entry[BX_TLB_SIZE] BX_CPP_AlignN(16);
} TLB;
#if BX_SUPPORT_X86_64
#define LPFOf(laddr) ((laddr) & BX_CONST64(0xfffffffffffff000))
#define LPF_MASK BX_CONST64(0xfffffffffffff000)
#else
#define LPFOf(laddr) ((laddr) & 0xfffff000)
#define LPF_MASK 0xfffff000
#endif
#define LPFOf(laddr) ((laddr) & LPF_MASK)
#endif // #if BX_USE_TLB
#define PAGE_OFFSET(laddr) ((Bit32u)(laddr) & 0xfff)
@ -3172,6 +3175,10 @@ public: // for now...
BX_SMF BX_CPP_INLINE bx_bool long_mode(void);
BX_SMF BX_CPP_INLINE unsigned get_cpu_mode(void);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
BX_SMF BX_CPP_INLINE bx_bool alignment_check(void);
#endif
#if BX_CPU_LEVEL >= 5
BX_SMF Bit64u get_TSC();
BX_SMF void set_TSC(Bit32u tsc);
@ -3375,6 +3382,15 @@ BX_CPP_INLINE unsigned BX_CPU_C::get_cpu_mode(void)
return (BX_CPU_THIS_PTR cpu_mode);
}
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
BX_CPP_INLINE bx_bool BX_CPU_C::alignment_check(void)
{
return (Bit32u)(BX_CPU_THIS_PTR alignment_check_mask) & 1;
}
#endif
BOCHSAPI extern const bx_bool bx_parity_lookup[256];
BX_CPP_INLINE void BX_CPU_C::set_PF_base(Bit8u val)

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: init.cc,v 1.152 2008-02-02 21:46:51 sshwarts Exp $
// $Id: init.cc,v 1.153 2008-02-11 20:52:10 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -866,7 +866,7 @@ void BX_CPU_C::reset(unsigned source)
BX_CPU_THIS_PTR in_smm = 0;
BX_CPU_THIS_PTR nmi_disable = 0;
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
BX_CPU_THIS_PTR alignment_check = 0;
BX_CPU_THIS_PTR alignment_check_mask = LPF_MASK;
#endif
BX_CPU_THIS_PTR smbase = 0x30000;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: paging.cc,v 1.108 2008-02-02 21:46:53 sshwarts Exp $
// $Id: paging.cc,v 1.109 2008-02-11 20:52:10 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -624,13 +624,10 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
// note - we assume physical memory < 4gig so for brevity & speed, we'll use
// 32 bit entries although cr3 is expanded to 64 bits.
bx_phy_address paddress, ppf, poffset;
bx_phy_address paddress, ppf, poffset = PAGE_OFFSET(laddr);
bx_bool isWrite = (rw >= BX_WRITE); // write or r-m-w
unsigned pl = (curr_pl == 3);
poffset = laddr & 0x00000fff; // physical offset
#if BX_USE_TLB
InstrTLB_Increment(tlbLookups);
InstrTLB_Stats();
@ -1044,17 +1041,16 @@ bx_bool BX_CPU_C::dbg_xlate_linear2phy(bx_address laddr, bx_phy_address *phy)
return 1;
}
bx_address lpf = LPFOf(laddr); // linear page frame
bx_phy_address poffset = (bx_phy_address)(laddr & 0x00000fff); // physical offset
bx_phy_address paddress;
// see if page is in the TLB first
#if BX_USE_TLB
bx_address lpf = LPFOf(laddr);
unsigned TLB_index = BX_TLB_INDEX_OF(lpf, 0);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[TLB_index];
if (tlbEntry->lpf == lpf) {
paddress = tlbEntry->ppf | poffset;
paddress = tlbEntry->ppf | PAGE_OFFSET(laddr);
*phy = paddress;
return 1;
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: proc_ctrl.cc,v 1.199 2008-02-07 18:28:50 sshwarts Exp $
// $Id: proc_ctrl.cc,v 1.200 2008-02-11 20:52:10 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -1223,11 +1223,15 @@ void BX_CPU_C::handleCpuModeChange(void)
void BX_CPU_C::handleAlignmentCheck(void)
{
if (CPL == 3 && BX_CPU_THIS_PTR cr0.get_AM() && BX_CPU_THIS_PTR get_AC()) {
BX_CPU_THIS_PTR alignment_check = 1;
#if BX_SUPPORT_X86_64
BX_CPU_THIS_PTR alignment_check_mask = BX_CONST64(0xFFFFFFFFFFFFFFFF);
#else
BX_CPU_THIS_PTR alignment_check_mask = 0xFFFFFFFF;
#endif
BX_INFO(("Enable alignment check (#AC exception)"));
}
else {
BX_CPU_THIS_PTR alignment_check = 0;
BX_CPU_THIS_PTR alignment_check_mask = LPF_MASK;
}
}
#endif
@ -2035,7 +2039,7 @@ void BX_CPU_C::SYSENTER(bxInstruction_c *i)
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
BX_CPU_THIS_PTR alignment_check = 0; // CPL=0
BX_CPU_THIS_PTR alignment_check_mask = LPF_MASK; // CPL=0
#endif
parse_selector((BX_CPU_THIS_PTR msr.sysenter_cs_msr + 8) & BX_SELECTOR_RPL_MASK,
@ -2178,7 +2182,7 @@ void BX_CPU_C::SYSCALL(bxInstruction_c *i)
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
BX_CPU_THIS_PTR alignment_check = 0; // CPL=0
BX_CPU_THIS_PTR alignment_check_mask = LPF_MASK; // CPL=0
#endif
// set up SS segment, flat, 64-bit DPL=0
@ -2230,7 +2234,7 @@ void BX_CPU_C::SYSCALL(bxInstruction_c *i)
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
BX_CPU_THIS_PTR alignment_check = 0; // CPL=0
BX_CPU_THIS_PTR alignment_check_mask = LPF_MASK; // CPL=0
#endif
// set up SS segment, flat, 32-bit DPL=0

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: smm.cc,v 1.32 2008-02-02 21:46:53 sshwarts Exp $
// $Id: smm.cc,v 1.33 2008-02-11 20:52:10 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2006 Stanislav Shwartsman
@ -171,7 +171,7 @@ void BX_CPU_C::enter_system_management_mode(void)
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
BX_CPU_THIS_PTR alignment_check = 0;
BX_CPU_THIS_PTR alignment_check_mask = LPF_MASK;
#endif
/* DS (Data Segment) and descriptor cache */