- Removed --enable-guest2hos-tlb configure option. The option will be

always enabled for any Bochs configuration.
This commit is contained in:
Stanislav Shwartsman 2008-12-11 21:19:38 +00:00
parent 69bd21bf1d
commit a2e07ff971
15 changed files with 57 additions and 408 deletions

View File

@ -28,6 +28,8 @@ Detailed change log :
the code.
- Added ability to choose Bochs log file name and Bochs debugger log file
name from Bochs command line (using new -log and -dbglog options)
- Removed --enable-guest2hos-tlb configure option. The option will be
always enabled for any Bochs configuration.
- BIOS
- Added S3 (suspend to RAM) ACPI state to BIOS (patch by Gleb Natapov)

View File

@ -1,6 +1,6 @@
#!/usr/bin/perl
#####################################################################
# $Id: batch-build.perl,v 1.11 2007-09-20 17:33:31 sshwarts Exp $
# $Id: batch-build.perl,v 1.12 2008-12-11 21:19:38 sshwarts Exp $
#####################################################################
#
# Batch build tool for multiple configurations
@ -132,8 +132,6 @@ add_configuration ('large-pages',
'--enable-large-pages');
add_configuration ('pae',
'--enable-pae');
add_configuration ('g2h-tlb',
'--enable-guest2host-tlb');
add_configuration ('repeat',
'--enable-repeat-speedups');
add_configuration ('globalpg',
@ -174,6 +172,10 @@ add_configuration ('sse1',
'--enable-sse=1');
add_configuration ('sse2',
'--enable-sse=2');
add_configuration ('sse3',
'--enable-sse=3');
add_configuration ('sse4',
'--enable-sse=4');
add_configuration ('sse2-dbg',
'--enable-sse=2 --enable-debugger');
add_configuration ('sse2-x86-64-wx-d',

View File

@ -737,7 +737,6 @@ typedef
#define BX_SUPPORT_PAE 0
#define BX_SUPPORT_1G_PAGES 0
#define BX_SupportGuest2HostTLB 0
#define BX_SupportRepeatSpeedups 0
#define BX_SupportHostAsms 0

58
bochs/configure vendored
View File

@ -1,5 +1,5 @@
#! /bin/sh
# From configure.in Id: configure.in,v 1.375 2008/10/21 13:45:03 sshwarts Exp .
# From configure.in Id: configure.in,v 1.376 2008/12/11 21:00:01 sshwarts Exp .
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.61.
#
@ -1569,7 +1569,6 @@ Optional Features:
--enable-global-pages support for global pages in PDE/PTE
--enable-1g-pages support for 1G pages in long mode
--enable-mtrr support for MTRRs
--enable-guest2host-tlb support guest to host addr TLB for speed
--enable-repeat-speedups support repeated IO and mem copy speedups
--enable-trace-cache support instruction trace cache
--enable-icache support instruction cache
@ -1612,6 +1611,7 @@ Optional Features:
--enable-iodebug enable I/O interface to debugger
--enable-docbook build the Docbook documentation
--enable-xpm enable the check for XPM support
--enable-guest2host-tlb enables guest to host addr TLB for speed (deprecated)
--enable-external-debugger enables external debugger interface (deprecated)
--enable-magic-breakpoints enables magic breakpoints (deprecated)
--enable-save-restore enables save/restore (deprecated)
@ -34393,29 +34393,6 @@ _ACEOF
fi
{ echo "$as_me:$LINENO: checking for guest to host TLB support" >&5
echo $ECHO_N "checking for guest to host TLB support... $ECHO_C" >&6; }
# Check whether --enable-guest2host-tlb was given.
if test "${enable_guest2host_tlb+set}" = set; then
enableval=$enable_guest2host_tlb; if test "$enableval" = yes; then
{ echo "$as_me:$LINENO: result: yes" >&5
echo "${ECHO_T}yes" >&6; }
speedup_guest2host_tlb=1
else
{ echo "$as_me:$LINENO: result: no" >&5
echo "${ECHO_T}no" >&6; }
speedup_guest2host_tlb=0
fi
else
{ echo "$as_me:$LINENO: result: no" >&5
echo "${ECHO_T}no" >&6; }
speedup_guest2host_tlb=0
fi
{ echo "$as_me:$LINENO: checking for repeated IO and mem copy speedups" >&5
echo $ECHO_N "checking for repeated IO and mem copy speedups... $ECHO_C" >&6; }
# Check whether --enable-repeat-speedups was given.
@ -34788,7 +34765,6 @@ fi
if test "$speedups_all" = 1; then
# Configure requested to force all options enabled.
speedup_guest2host_tlb=1
speedup_repeat=1
speedup_iCache=1
speedup_TraceCache=1
@ -34801,18 +34777,6 @@ echo "$as_me: error: iCache is required to compile with trace cache optimization
{ (exit 1); exit 1; }; }
fi
if test "$speedup_guest2host_tlb" = 1; then
cat >>confdefs.h <<\_ACEOF
#define BX_SupportGuest2HostTLB 1
_ACEOF
else
cat >>confdefs.h <<\_ACEOF
#define BX_SupportGuest2HostTLB 0
_ACEOF
fi
if test "$speedup_repeat" = 1; then
cat >>confdefs.h <<\_ACEOF
#define BX_SupportRepeatSpeedups 1
@ -39338,6 +39302,24 @@ if test "$with_rfb" = yes -a "$cross_configure" = 0; then
fi
{ echo "$as_me:$LINENO: checking for guest to host TLB support (deprecated)" >&5
echo $ECHO_N "checking for guest to host TLB support (deprecated)... $ECHO_C" >&6; }
# Check whether --enable-guest2host-tlb was given.
if test "${enable_guest2host_tlb+set}" = set; then
enableval=$enable_guest2host_tlb; { echo "$as_me:$LINENO: result: $enableval" >&5
echo "${ECHO_T}$enableval" >&6; }
{ { echo "$as_me:$LINENO: error: DEPRECATED - ALWAYS ON" >&5
echo "$as_me: error: DEPRECATED - ALWAYS ON" >&2;}
{ (exit 1); exit 1; }; }
else
{ echo "$as_me:$LINENO: result: no" >&5
echo "${ECHO_T}no" >&6; }
fi
{ echo "$as_me:$LINENO: checking for external debugger interface (deprecated)" >&5
echo $ECHO_N "checking for external debugger interface (deprecated)... $ECHO_C" >&6; }
# Check whether --enable-external-debugger was given.

View File

@ -2,7 +2,7 @@ dnl // Process this file with autoconf to produce a configure script.
AC_PREREQ(2.50)
AC_INIT(bochs.h)
AC_REVISION([[$Id: configure.in,v 1.376 2008-12-11 21:00:01 sshwarts Exp $]])
AC_REVISION([[$Id: configure.in,v 1.377 2008-12-11 21:19:37 sshwarts Exp $]])
AC_CONFIG_HEADER(config.h)
AC_CONFIG_HEADER(ltdlconf.h)
@ -995,22 +995,6 @@ AC_ARG_ENABLE(mtrr,
]
)
AC_MSG_CHECKING(for guest to host TLB support)
AC_ARG_ENABLE(guest2host-tlb,
[ --enable-guest2host-tlb support guest to host addr TLB for speed],
[if test "$enableval" = yes; then
AC_MSG_RESULT(yes)
speedup_guest2host_tlb=1
else
AC_MSG_RESULT(no)
speedup_guest2host_tlb=0
fi],
[
AC_MSG_RESULT(no)
speedup_guest2host_tlb=0
]
)
AC_MSG_CHECKING(for repeated IO and mem copy speedups)
AC_ARG_ENABLE(repeat-speedups,
[ --enable-repeat-speedups support repeated IO and mem copy speedups],
@ -1253,7 +1237,6 @@ AC_ARG_ENABLE(all-optimizations,
if test "$speedups_all" = 1; then
# Configure requested to force all options enabled.
speedup_guest2host_tlb=1
speedup_repeat=1
speedup_iCache=1
speedup_TraceCache=1
@ -1264,12 +1247,6 @@ if test "$speedup_iCache" = 0 -a "$speedup_TraceCache" = 1; then
AC_MSG_ERROR([iCache is required to compile with trace cache optimization])
fi
if test "$speedup_guest2host_tlb" = 1; then
AC_DEFINE(BX_SupportGuest2HostTLB, 1)
else
AC_DEFINE(BX_SupportGuest2HostTLB, 0)
fi
if test "$speedup_repeat" = 1; then
AC_DEFINE(BX_SupportRepeatSpeedups, 1)
else
@ -2943,6 +2920,16 @@ fi
dnl // DEPRECATED configure options - force users to remove them
AC_MSG_CHECKING(for guest to host TLB support (deprecated))
AC_ARG_ENABLE(guest2host-tlb,
[ --enable-guest2host-tlb enables guest to host addr TLB for speed (deprecated)],
[AC_MSG_RESULT($enableval)
AC_MSG_ERROR([DEPRECATED - ALWAYS ON])
],
[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING(for external debugger interface (deprecated))
AC_ARG_ENABLE(external-debugger,
[ --enable-external-debugger enables external debugger interface (deprecated)],

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access.cc,v 1.120 2008-09-08 20:47:33 sshwarts Exp $
// $Id: access.cc,v 1.121 2008-12-11 21:19:38 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -270,7 +270,6 @@ BX_CPU_C::system_read_byte(bx_address laddr)
{
Bit8u data;
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -284,7 +283,6 @@ BX_CPU_C::system_read_byte(bx_address laddr)
tlbEntry->ppf | pageOffset, 1, 0, BX_READ, (Bit8u*) &data);
return data;
}
#endif
access_read_linear(laddr, 1, 0, BX_READ, (void *) &data);
return data;
@ -295,7 +293,6 @@ BX_CPU_C::system_read_word(bx_address laddr)
{
Bit16u data;
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -309,7 +306,6 @@ BX_CPU_C::system_read_word(bx_address laddr)
tlbEntry->ppf | pageOffset, 2, 0, BX_READ, (Bit8u*) &data);
return data;
}
#endif
access_read_linear(laddr, 2, 0, BX_READ, (void *) &data);
return data;
@ -320,7 +316,6 @@ BX_CPU_C::system_read_dword(bx_address laddr)
{
Bit32u data;
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -334,7 +329,6 @@ BX_CPU_C::system_read_dword(bx_address laddr)
tlbEntry->ppf | pageOffset, 4, 0, BX_READ, (Bit8u*) &data);
return data;
}
#endif
access_read_linear(laddr, 4, 0, BX_READ, (void *) &data);
return data;
@ -345,7 +339,6 @@ BX_CPU_C::system_read_qword(bx_address laddr)
{
Bit64u data;
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
bx_address lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -359,13 +352,11 @@ BX_CPU_C::system_read_qword(bx_address laddr)
tlbEntry->ppf | pageOffset, 8, 0, BX_READ, (Bit8u*) &data);
return data;
}
#endif
access_read_linear(laddr, 8, 0, BX_READ, (void *) &data);
return data;
}
#if BX_SupportGuest2HostTLB
Bit8u* BX_CPP_AttrRegparmN(2)
BX_CPU_C::v2h_read_byte(bx_address laddr, bx_bool user)
{
@ -409,4 +400,3 @@ BX_CPU_C::v2h_write_byte(bx_address laddr, bx_bool user)
return 0;
}
#endif // BX_SupportGuest2HostTLB

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access32.cc,v 1.19 2008-10-06 17:50:06 sshwarts Exp $
// $Id: access32.cc,v 1.20 2008-12-11 21:19:38 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008 Stanislav Shwartsman
@ -39,7 +39,6 @@ BX_CPU_C::write_virtual_byte_32(unsigned s, Bit32u offset, Bit8u data)
if (offset <= seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -60,7 +59,6 @@ accessOK:
return;
}
}
#endif
access_write_linear(laddr, 1, CPL, (void *) &data);
return;
}
@ -88,7 +86,6 @@ BX_CPU_C::write_virtual_word_32(unsigned s, Bit32u offset, Bit16u data)
if (offset < seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
@ -113,7 +110,6 @@ accessOK:
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
@ -151,7 +147,6 @@ BX_CPU_C::write_virtual_dword_32(unsigned s, Bit32u offset, Bit32u data)
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
@ -176,7 +171,6 @@ accessOK:
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
@ -214,7 +208,6 @@ BX_CPU_C::write_virtual_qword_32(unsigned s, Bit32u offset, Bit64u data)
if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
@ -239,7 +232,6 @@ accessOK:
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
@ -279,7 +271,6 @@ BX_CPU_C::write_virtual_dqword_32(unsigned s, Bit32u offset, const BxPackedXmmRe
if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -301,7 +292,6 @@ accessOK:
return;
}
}
#endif
access_write_linear(laddr, 16, CPL, (void *) data);
return;
@ -330,7 +320,6 @@ BX_CPU_C::write_virtual_dqword_aligned_32(unsigned s, Bit32u offset, const BxPac
if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
Bit32u lpf = AlignedAccessLPFOf(laddr, 15);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -352,7 +341,6 @@ accessOK:
return;
}
}
#endif
if (laddr & 15) {
BX_ERROR(("write_virtual_dqword_aligned_32(): #GP misaligned access"));
exception(BX_GP_EXCEPTION, 0, 0);
@ -387,7 +375,6 @@ BX_CPU_C::read_virtual_byte_32(unsigned s, Bit32u offset)
if (offset <= seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -405,7 +392,6 @@ accessOK:
return data;
}
}
#endif
access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
return data;
}
@ -434,7 +420,6 @@ BX_CPU_C::read_virtual_word_32(unsigned s, Bit32u offset)
if (offset < seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
@ -456,7 +441,6 @@ accessOK:
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
@ -495,7 +479,6 @@ BX_CPU_C::read_virtual_dword_32(unsigned s, Bit32u offset)
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
@ -517,7 +500,6 @@ accessOK:
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
@ -556,7 +538,6 @@ BX_CPU_C::read_virtual_qword_32(unsigned s, Bit32u offset)
if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
@ -578,7 +559,6 @@ accessOK:
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
@ -618,7 +598,6 @@ BX_CPU_C::read_virtual_dqword_32(unsigned s, Bit32u offset, BxPackedXmmRegister
if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -637,8 +616,6 @@ accessOK:
return;
}
}
#endif
access_read_linear(laddr, 16, CPL, BX_READ, (void *) data);
return;
}
@ -666,7 +643,6 @@ BX_CPU_C::read_virtual_dqword_aligned_32(unsigned s, Bit32u offset, BxPackedXmmR
if (offset <= (seg->cache.u.segment.limit_scaled-15)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
Bit32u lpf = AlignedAccessLPFOf(laddr, 15);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -685,7 +661,6 @@ accessOK:
return;
}
}
#endif
if (laddr & 15) {
BX_ERROR(("read_virtual_dqword_aligned_32(): #GP misaligned access"));
exception(BX_GP_EXCEPTION, 0, 0);
@ -725,7 +700,6 @@ BX_CPU_C::read_RMW_virtual_byte_32(unsigned s, Bit32u offset)
if (offset <= seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -747,7 +721,6 @@ accessOK:
return data;
}
}
#endif
access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
return data;
}
@ -776,7 +749,6 @@ BX_CPU_C::read_RMW_virtual_word_32(unsigned s, Bit32u offset)
if (offset < seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
@ -802,7 +774,6 @@ accessOK:
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
@ -841,7 +812,6 @@ BX_CPU_C::read_RMW_virtual_dword_32(unsigned s, Bit32u offset)
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
@ -867,7 +837,6 @@ accessOK:
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
@ -906,7 +875,6 @@ BX_CPU_C::read_RMW_virtual_qword_32(unsigned s, Bit32u offset)
if (offset <= (seg->cache.u.segment.limit_scaled-7)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
@ -932,7 +900,6 @@ accessOK:
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
@ -1135,7 +1102,6 @@ void BX_CPU_C::write_new_stack_word_32(bx_segment_reg_t *seg, Bit32u offset, uns
accessOK:
laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
bx_bool user = (curr_pl == 3);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
@ -1160,7 +1126,6 @@ accessOK:
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check() && user) {
@ -1198,7 +1163,6 @@ void BX_CPU_C::write_new_stack_dword_32(bx_segment_reg_t *seg, Bit32u offset, un
accessOK:
laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
bx_bool user = (curr_pl == 3);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
@ -1223,7 +1187,6 @@ accessOK:
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check() && user) {
@ -1261,7 +1224,6 @@ void BX_CPU_C::write_new_stack_qword_32(bx_segment_reg_t *seg, Bit32u offset, un
accessOK:
laddr = (Bit32u)(seg->cache.u.segment.base) + offset;
bx_bool user = (curr_pl == 3);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit32u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
@ -1286,7 +1248,6 @@ accessOK:
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check() && user) {

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access64.cc,v 1.20 2008-09-18 17:37:28 sshwarts Exp $
// $Id: access64.cc,v 1.21 2008-12-11 21:19:38 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008 Stanislav Shwartsman
@ -36,7 +36,6 @@ BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit64u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -57,7 +56,6 @@ BX_CPU_C::write_virtual_byte_64(unsigned s, Bit64u offset, Bit8u data)
return;
}
}
#endif
if (! IsCanonical(laddr)) {
BX_ERROR(("write_virtual_byte_64(): canonical failure"));
@ -75,7 +73,6 @@ BX_CPU_C::write_virtual_word_64(unsigned s, Bit64u offset, Bit16u data)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_WRITE);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
@ -100,7 +97,6 @@ BX_CPU_C::write_virtual_word_64(unsigned s, Bit64u offset, Bit16u data)
return;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
BX_ERROR(("write_virtual_word_64(): canonical failure"));
@ -127,7 +123,6 @@ BX_CPU_C::write_virtual_dword_64(unsigned s, Bit64u offset, Bit32u data)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_WRITE);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
@ -152,7 +147,6 @@ BX_CPU_C::write_virtual_dword_64(unsigned s, Bit64u offset, Bit32u data)
return;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
BX_ERROR(("write_virtual_dword_64(): canonical failure"));
@ -179,7 +173,6 @@ BX_CPU_C::write_virtual_qword_64(unsigned s, Bit64u offset, Bit64u data)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_WRITE);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
@ -204,7 +197,6 @@ BX_CPU_C::write_virtual_qword_64(unsigned s, Bit64u offset, Bit64u data)
return;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
BX_ERROR(("write_virtual_qword_64(): canonical failure"));
@ -231,7 +223,6 @@ BX_CPU_C::write_virtual_dqword_64(unsigned s, Bit64u offset, const BxPackedXmmRe
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
Bit64u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -253,7 +244,6 @@ BX_CPU_C::write_virtual_dqword_64(unsigned s, Bit64u offset, const BxPackedXmmRe
return;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+15)) {
BX_ERROR(("write_virtual_dqword_64(): canonical failure"));
@ -271,7 +261,6 @@ BX_CPU_C::write_virtual_dqword_aligned_64(unsigned s, Bit64u offset, const BxPac
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_WRITE);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -293,7 +282,6 @@ BX_CPU_C::write_virtual_dqword_aligned_64(unsigned s, Bit64u offset, const BxPac
return;
}
}
#endif
if (laddr & 15) {
BX_ERROR(("write_virtual_dqword_aligned_64(): #GP misaligned access"));
@ -316,7 +304,6 @@ BX_CPU_C::read_virtual_byte_64(unsigned s, Bit64u offset)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit64u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -334,7 +321,6 @@ BX_CPU_C::read_virtual_byte_64(unsigned s, Bit64u offset)
return data;
}
}
#endif
if (! IsCanonical(laddr)) {
BX_ERROR(("read_virtual_byte_64(): canonical failure"));
@ -353,7 +339,6 @@ BX_CPU_C::read_virtual_word_64(unsigned s, Bit64u offset)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_READ);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
@ -375,7 +360,6 @@ BX_CPU_C::read_virtual_word_64(unsigned s, Bit64u offset)
return data;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
BX_ERROR(("read_virtual_word_64(): canonical failure"));
@ -403,7 +387,6 @@ BX_CPU_C::read_virtual_dword_64(unsigned s, Bit64u offset)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_READ);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
@ -425,7 +408,6 @@ BX_CPU_C::read_virtual_dword_64(unsigned s, Bit64u offset)
return data;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
BX_ERROR(("read_virtual_dword_64(): canonical failure"));
@ -453,7 +435,6 @@ BX_CPU_C::read_virtual_qword_64(unsigned s, Bit64u offset)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_READ);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
@ -475,7 +456,6 @@ BX_CPU_C::read_virtual_qword_64(unsigned s, Bit64u offset)
return data;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
BX_ERROR(("read_virtual_qword_64(): canonical failure"));
@ -501,7 +481,6 @@ BX_CPU_C::read_virtual_dqword_64(unsigned s, Bit64u offset, BxPackedXmmRegister
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
Bit64u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -520,7 +499,6 @@ BX_CPU_C::read_virtual_dqword_64(unsigned s, Bit64u offset, BxPackedXmmRegister
return;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+15)) {
BX_ERROR(("read_virtual_dqword_64(): canonical failure"));
@ -537,7 +515,6 @@ BX_CPU_C::read_virtual_dqword_aligned_64(unsigned s, Bit64u offset, BxPackedXmmR
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 16, BX_READ);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 15);
Bit64u lpf = AlignedAccessLPFOf(laddr, 15);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -556,7 +533,6 @@ BX_CPU_C::read_virtual_dqword_aligned_64(unsigned s, Bit64u offset, BxPackedXmmR
return;
}
}
#endif
if (laddr & 15) {
BX_ERROR(("read_virtual_dqword_aligned_64(): #GP misaligned access"));
@ -584,7 +560,6 @@ BX_CPU_C::read_RMW_virtual_byte_64(unsigned s, Bit64u offset)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit64u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -606,7 +581,6 @@ BX_CPU_C::read_RMW_virtual_byte_64(unsigned s, Bit64u offset)
return data;
}
}
#endif
if (! IsCanonical(laddr)) {
BX_ERROR(("read_RMW_virtual_byte_64(): canonical failure"));
@ -625,7 +599,6 @@ BX_CPU_C::read_RMW_virtual_word_64(unsigned s, Bit64u offset)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_RW);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
@ -651,7 +624,6 @@ BX_CPU_C::read_RMW_virtual_word_64(unsigned s, Bit64u offset)
return data;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
BX_ERROR(("read_RMW_virtual_word_64(): canonical failure"));
@ -679,7 +651,6 @@ BX_CPU_C::read_RMW_virtual_dword_64(unsigned s, Bit64u offset)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
@ -705,7 +676,6 @@ BX_CPU_C::read_RMW_virtual_dword_64(unsigned s, Bit64u offset)
return data;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
BX_ERROR(("read_RMW_virtual_dword_64(): canonical failure"));
@ -733,7 +703,6 @@ BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
@ -759,7 +728,6 @@ BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
return data;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
BX_ERROR(("read_RMW_virtual_qword_64(): canonical failure"));
@ -782,7 +750,6 @@ BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
void BX_CPU_C::write_new_stack_word_64(Bit64u laddr, unsigned curr_pl, Bit16u data)
{
bx_bool user = (curr_pl == 3);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (1 & BX_CPU_THIS_PTR alignment_check_mask));
@ -807,7 +774,6 @@ void BX_CPU_C::write_new_stack_word_64(Bit64u laddr, unsigned curr_pl, Bit16u da
return;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+1)) {
BX_ERROR(("write_new_stack_word_64(): canonical failure"));
@ -829,7 +795,6 @@ void BX_CPU_C::write_new_stack_word_64(Bit64u laddr, unsigned curr_pl, Bit16u da
void BX_CPU_C::write_new_stack_dword_64(Bit64u laddr, unsigned curr_pl, Bit32u data)
{
bx_bool user = (curr_pl == 3);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (3 & BX_CPU_THIS_PTR alignment_check_mask));
@ -854,7 +819,6 @@ void BX_CPU_C::write_new_stack_dword_64(Bit64u laddr, unsigned curr_pl, Bit32u d
return;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+3)) {
BX_ERROR(("write_new_stack_dword_64(): canonical failure"));
@ -876,7 +840,6 @@ void BX_CPU_C::write_new_stack_dword_64(Bit64u laddr, unsigned curr_pl, Bit32u d
void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr, unsigned curr_pl, Bit64u data)
{
bx_bool user = (curr_pl == 3);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
Bit64u lpf = AlignedAccessLPFOf(laddr, (7 & BX_CPU_THIS_PTR alignment_check_mask));
@ -901,7 +864,6 @@ void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr, unsigned curr_pl, Bit64u d
return;
}
}
#endif
if (! IsCanonical(laddr) || ! IsCanonical(laddr+7)) {
BX_ERROR(("write_new_stack_qword_64(): canonical failure"));

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.cc,v 1.256 2008-12-07 19:47:34 sshwarts Exp $
// $Id: cpu.cc,v 1.257 2008-12-11 21:19:38 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -714,9 +714,7 @@ void BX_CPU_C::prefetch(void)
if ((tlbEntry->lpf == lpf) && !(tlbEntry->accessBits & USER_PL)) {
BX_CPU_THIS_PTR pAddrA20Page = A20ADDR(tlbEntry->ppf);
#if BX_SupportGuest2HostTLB
fetchPtr = (Bit8u*) (tlbEntry->hostPageAddr);
#endif
}
else {
bx_phy_address pAddr;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.h,v 1.542 2008-12-06 18:52:02 sshwarts Exp $
// $Id: cpu.h,v 1.543 2008-12-11 21:19:38 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -2990,10 +2990,8 @@ public: // for now...
BX_SMF Bit32u system_read_dword(bx_address laddr) BX_CPP_AttrRegparmN(1);
BX_SMF Bit64u system_read_qword(bx_address laddr) BX_CPP_AttrRegparmN(1);
#if BX_SupportGuest2HostTLB
BX_SMF Bit8u* v2h_read_byte(bx_address laddr, bx_bool user) BX_CPP_AttrRegparmN(2);
BX_SMF Bit8u* v2h_write_byte(bx_address laddr, bx_bool user) BX_CPP_AttrRegparmN(2);
#endif
BX_SMF void branch_near16(Bit16u new_IP) BX_CPP_AttrRegparmN(1);
BX_SMF void branch_near32(Bit32u new_EIP) BX_CPP_AttrRegparmN(1);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: io.cc,v 1.68 2008-12-05 22:34:42 sshwarts Exp $
// $Id: io.cc,v 1.69 2008-12-11 21:19:38 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -65,29 +65,9 @@ Bit32u BX_CPU_C::FastRepINSW(bxInstruction_c *i, bx_address dstOff, Bit16u port,
// check that the address is word aligned
if (laddrDst & 1) return 0;
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
paddrDst = A20ADDR(paddrDst);
}
else
paddrDst = A20ADDR(laddrDst);
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrDst = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, paddrDst, BX_WRITE);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(paddrDst);
#endif
#endif
// See how many words can fit in the rest of this page.
if (BX_CPU_THIS_PTR get_DF()) {
@ -158,20 +138,7 @@ Bit32u BX_CPU_C::FastRepOUTSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr
// check that the address is word aligned
if (laddrSrc & 1) return 0;
#if BX_SupportGuest2HostTLB
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
#else
bx_phy_address paddrSrc;
if (BX_CPU_THIS_PTR cr0.get_PG())
paddrSrc = dtranslate_linear(laddrSrc, CPL, BX_READ);
else
paddrSrc = laddrSrc;
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrSrc = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, A20ADDR(paddrSrc), BX_READ);
#endif
// Check that native host access was not vetoed for that page
if (!hostAddrSrc) return 0;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: load32.cc,v 1.2 2008-09-16 20:57:16 sshwarts Exp $
// $Id: load32.cc,v 1.3 2008-12-11 21:19:38 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008 Stanislav Shwartsman
@ -48,7 +48,6 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOAD_Eb_Resolve16BaseIndex(bxInstruction_c
if (offset <= seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -63,9 +62,7 @@ accessOK:
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &TMP8L);
}
else
#endif
{
else {
access_read_linear(laddr, 1, CPL, BX_READ, (void *) &TMP8L);
}
@ -98,7 +95,6 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOAD_Eb_Resolve32Base(bxInstruction_c *i)
if (offset <= seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -113,9 +109,7 @@ accessOK:
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &TMP8L);
}
else
#endif
{
else {
access_read_linear(laddr, 1, CPL, BX_READ, (void *) &TMP8L);
}
@ -148,7 +142,6 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOAD_Eb_Resolve32BaseIndex(bxInstruction_c
if (offset <= seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -163,9 +156,7 @@ accessOK:
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &TMP8L);
}
else
#endif
{
else {
access_read_linear(laddr, 1, CPL, BX_READ, (void *) &TMP8L);
}
@ -205,7 +196,6 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOAD_Ew_Resolve16BaseIndex(bxInstruction_c
if (offset < seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -220,9 +210,7 @@ accessOK:
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &TMP16);
}
else
#endif
{
else {
access_read_linear(laddr, 2, CPL, BX_READ, (void *) &TMP16);
}
@ -255,7 +243,6 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOAD_Ew_Resolve32Base(bxInstruction_c *i)
if (offset < seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -270,9 +257,7 @@ accessOK:
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &TMP16);
}
else
#endif
{
else {
access_read_linear(laddr, 2, CPL, BX_READ, (void *) &TMP16);
}
@ -305,7 +290,6 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOAD_Ew_Resolve32BaseIndex(bxInstruction_c
if (offset < seg->cache.u.segment.limit_scaled) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -320,9 +304,7 @@ accessOK:
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &TMP16);
}
else
#endif
{
else {
access_read_linear(laddr, 2, CPL, BX_READ, (void *) &TMP16);
}
@ -362,7 +344,6 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOAD_Ed_Resolve16BaseIndex(bxInstruction_c
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -377,9 +358,7 @@ accessOK:
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &TMP32);
}
else
#endif
{
else {
access_read_linear(laddr, 4, CPL, BX_READ, (void *) &TMP32);
}
@ -412,7 +391,6 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOAD_Ed_Resolve32Base(bxInstruction_c *i)
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -427,9 +405,7 @@ accessOK:
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &TMP32);
}
else
#endif
{
else {
access_read_linear(laddr, 4, CPL, BX_READ, (void *) &TMP32);
}
@ -462,7 +438,6 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LOAD_Ed_Resolve32BaseIndex(bxInstruction_c
if (offset < (seg->cache.u.segment.limit_scaled-2)) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
@ -477,9 +452,7 @@ accessOK:
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &TMP32);
}
else
#endif
{
else {
access_read_linear(laddr, 4, CPL, BX_READ, (void *) &TMP32);
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: paging.cc,v 1.162 2008-12-11 21:00:01 sshwarts Exp $
// $Id: paging.cc,v 1.163 2008-12-11 21:19:38 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -1199,7 +1199,6 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
tlbEntry->accessBits |= TLB_GlobalPage;
#endif
#if BX_SupportGuest2HostTLB
// Attempt to get a host pointer to this physical page. Put that
// pointer in the TLB cache. Note if the request is vetoed, NULL
// will be returned, and it's OK to OR zero in anyways.
@ -1213,7 +1212,6 @@ bx_phy_address BX_CPU_C::translate_linear(bx_address laddr, unsigned curr_pl, un
#endif
tlbEntry->lpf = lpf; // allow direct access with HostPtr
}
#endif
return paddress;
}
@ -1377,7 +1375,6 @@ void BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, (bx_phy_address) laddr, len,
curr_pl, BX_WRITE, (Bit8u*) data);
#if BX_SupportGuest2HostTLB
// do not replace to the TLB if there is a breakpoint defined
// in the same page
#if BX_X86_DEBUGGER
@ -1404,7 +1401,6 @@ void BX_CPU_C::access_write_linear(bx_address laddr, unsigned len, unsigned curr
}
}
}
#endif
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS, (bx_phy_address) laddr, len, data);
}
@ -1547,7 +1543,6 @@ void BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_
BX_CPU_THIS_PTR address_xlation.pages = 1;
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, (bx_phy_address) laddr, len, xlate_rw);
#if BX_SupportGuest2HostTLB
// do not replace to the TLB if there is a breakpoint defined
// in the same page
#if BX_X86_DEBUGGER
@ -1574,7 +1569,6 @@ void BX_CPU_C::access_read_linear(bx_address laddr, unsigned len, unsigned curr_
}
}
}
#endif
BX_MEM(0)->readPhysicalPage(BX_CPU_THIS, (bx_phy_address) laddr, len, data);
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr, (bx_phy_address) laddr, len,

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: string.cc,v 1.68 2008-12-05 22:34:42 sshwarts Exp $
// $Id: string.cc,v 1.69 2008-12-11 21:19:38 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -65,49 +65,14 @@ Bit32u BX_CPU_C::FastRepMOVSB(bxInstruction_c *i, unsigned srcSeg, bx_address sr
laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff);
#if BX_SupportGuest2HostTLB
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
#else
bx_phy_address paddrSrc;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrSrc = dtranslate_linear(laddrSrc, CPL, BX_READ);
}
else {
paddrSrc = laddrSrc;
}
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrSrc = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, A20ADDR(paddrSrc), BX_READ);
#endif
if (! hostAddrSrc) return 0;
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
paddrDst = A20ADDR(paddrDst);
}
else
paddrDst = A20ADDR(laddrDst);
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrDst = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, paddrDst, BX_WRITE);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(paddrDst);
#endif
#endif
// See how many bytes can fit in the rest of this page.
if (BX_CPU_THIS_PTR get_DF()) {
@ -170,49 +135,14 @@ Bit32u BX_CPU_C::FastRepMOVSW(bxInstruction_c *i, unsigned srcSeg, bx_address sr
laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff);
#if BX_SupportGuest2HostTLB
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
#else
bx_phy_address paddrSrc;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrSrc = dtranslate_linear(laddrSrc, CPL, BX_READ);
}
else {
paddrSrc = laddrSrc;
}
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrSrc = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, A20ADDR(paddrSrc), BX_READ);
#endif
if (! hostAddrSrc) return 0;
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
paddrDst = A20ADDR(paddrDst);
}
else
paddrDst = A20ADDR(laddrDst);
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrDst = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, paddrDst, BX_WRITE);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(paddrDst);
#endif
#endif
// See how many words can fit in the rest of this page.
if (BX_CPU_THIS_PTR get_DF()) {
@ -278,49 +208,14 @@ Bit32u BX_CPU_C::FastRepMOVSD(bxInstruction_c *i, unsigned srcSeg, bx_address sr
laddrSrc = BX_CPU_THIS_PTR get_laddr(srcSeg, srcOff);
#if BX_SupportGuest2HostTLB
hostAddrSrc = v2h_read_byte(laddrSrc, BX_CPU_THIS_PTR user_pl);
#else
bx_phy_address paddrSrc;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrSrc = dtranslate_linear(laddrSrc, CPL, BX_READ);
}
else {
paddrSrc = laddrSrc;
}
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrSrc = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, A20ADDR(paddrSrc), BX_READ);
#endif
if (! hostAddrSrc) return 0;
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
paddrDst = A20ADDR(paddrDst);
}
else
paddrDst = A20ADDR(laddrDst);
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrDst = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, paddrDst, BX_WRITE);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(paddrDst);
#endif
#endif
// See how many dwords can fit in the rest of this page.
if (BX_CPU_THIS_PTR get_DF()) {
@ -380,29 +275,9 @@ Bit32u BX_CPU_C::FastRepSTOSB(bxInstruction_c *i, unsigned dstSeg, bx_address ds
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
paddrDst = A20ADDR(paddrDst);
}
else
paddrDst = A20ADDR(laddrDst);
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrDst = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, paddrDst, BX_WRITE);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(paddrDst);
#endif
#endif
// See how many bytes can fit in the rest of this page.
if (BX_CPU_THIS_PTR get_DF()) {
@ -454,29 +329,9 @@ Bit32u BX_CPU_C::FastRepSTOSW(bxInstruction_c *i, unsigned dstSeg, bx_address ds
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
paddrDst = A20ADDR(paddrDst);
}
else
paddrDst = A20ADDR(laddrDst);
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrDst = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, paddrDst, BX_WRITE);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(paddrDst);
#endif
#endif
// See how many words can fit in the rest of this page.
if (BX_CPU_THIS_PTR get_DF()) {
@ -530,29 +385,9 @@ Bit32u BX_CPU_C::FastRepSTOSD(bxInstruction_c *i, unsigned dstSeg, bx_address ds
laddrDst = BX_CPU_THIS_PTR get_laddr(dstSeg, dstOff);
#if BX_SupportGuest2HostTLB
hostAddrDst = v2h_write_byte(laddrDst, BX_CPU_THIS_PTR user_pl);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#else
bx_phy_address paddrDst;
if (BX_CPU_THIS_PTR cr0.get_PG()) {
paddrDst = dtranslate_linear(laddrDst, CPL, BX_WRITE);
paddrDst = A20ADDR(paddrDst);
}
else
paddrDst = A20ADDR(laddrDst);
// If we want to write directly into the physical memory array,
// we need the A20 address.
hostAddrDst = BX_MEM(0)->getHostMemAddr(BX_CPU_THIS, paddrDst, BX_WRITE);
// Check that native host access was not vetoed for that page
if (!hostAddrDst) return 0;
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(paddrDst);
#endif
#endif
// See how many dwords can fit in the rest of this page.
if (BX_CPU_THIS_PTR get_DF()) {

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: main.cc,v 1.385 2008-11-18 20:58:08 sshwarts Exp $
// $Id: main.cc,v 1.386 2008-12-11 21:19:37 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2002 MandrakeSoft S.A.
@ -1016,7 +1016,6 @@ void bx_init_hardware()
BX_INFO((" XSAVE support: %s",BX_SUPPORT_XSAVE?"yes":"no"));
BX_INFO((" AES support: %s",BX_SUPPORT_AES?"yes":"no"));
BX_INFO(("Optimization configuration"));
BX_INFO((" Guest2HostTLB support: %s",BX_SupportGuest2HostTLB?"yes":"no"));
BX_INFO((" RepeatSpeedups support: %s",BX_SupportRepeatSpeedups?"yes":"no"));
BX_INFO((" Icache support: %s",BX_SUPPORT_ICACHE?"yes":"no"));
BX_INFO((" Trace cache support: %s",BX_SUPPORT_TRACE_CACHE?"yes":"no"));