Faster 32-bit emulation wwith 64-bit enabled mode.

~10% speedup byu optimization of 32-bit mem access
This commit is contained in:
Stanislav Shwartsman 2008-06-12 19:14:40 +00:00
parent 607900dd4d
commit 92568f7525
21 changed files with 1290 additions and 1250 deletions

View File

@ -56,6 +56,7 @@ OBJS = \
resolve.o \
fetchdecode.o \
access.o \
access32.o \
shift16.o \
logical16.o \
ctrl_xfer32.o \
@ -174,6 +175,14 @@ access.o: access.@CPP_SUFFIX@ ../bochs.h ../config.h ../osdep.h ../bx_debug/debu
crregs.h descriptor.h instr.h lazy_flags.h icache.h apic.h \
../cpu/i387.h ../fpu/softfloat.h ../config.h ../fpu/tag_w.h \
../fpu/status_w.h ../fpu/control_w.h ../cpu/xmm.h stack.h
access32.o: access32.@CPP_SUFFIX@ ../bochs.h ../config.h ../osdep.h ../bx_debug/debug.h \
../config.h ../osdep.h ../bxversion.h ../gui/siminterface.h \
../memory/memory.h ../pc_system.h ../plugin.h ../extplugin.h \
../gui/gui.h ../gui/textconfig.h ../config.h ../gui/keymap.h \
../instrument/stubs/instrument.h cpu.h ../disasm/disasm.h ../config.h \
crregs.h descriptor.h instr.h lazy_flags.h icache.h apic.h \
../cpu/i387.h ../fpu/softfloat.h ../config.h ../fpu/tag_w.h \
../fpu/status_w.h ../fpu/control_w.h ../cpu/xmm.h stack.h
access64.o: access64.@CPP_SUFFIX@ ../bochs.h ../config.h ../osdep.h ../bx_debug/debug.h \
../config.h ../osdep.h ../bxversion.h ../gui/siminterface.h \
../memory/memory.h ../pc_system.h ../plugin.h ../extplugin.h \

File diff suppressed because it is too large Load Diff

939
bochs/cpu/access32.cc Executable file
View File

@ -0,0 +1,939 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access32.cc,v 1.1 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008 Stanislav Shwartsman
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
//
/////////////////////////////////////////////////////////////////////////
#define NEED_CPU_REG_SHORTCUTS 1
#include "bochs.h"
#include "cpu.h"
#define LOG_THIS BX_CPU_THIS_PTR
// The macro was made in order to optimize access alignment into TLB lookup -
// when aligment check is enabled a misaligned access will miss the TLB.
// BX_CPU_THIS_PTR alignment_check_mask must be initialized to all'ones if
// alignment check exception is enabled and LPF_MASK if not.
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
#define AlignedAccessLPFOf32(laddr, alignment_mask) \
((laddr) & (LPF_MASK | (alignment_mask))) & (BX_CPU_THIS_PTR alignment_check_mask)
#else
#define AlignedAccessLPFOf32(laddr, alignment_mask) LPFOf(laddr)
#endif
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_virtual_byte_32(unsigned s, Bit32u offset, Bit8u data)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_WRITE);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_WRITE);
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 1, CPL, BX_WRITE, (Bit8u*) &data);
Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
*hostAddr = data;
return;
}
}
#endif
access_write_linear(laddr, 1, CPL, (void *) &data);
return;
}
if (seg->cache.valid & SegAccessWOK) {
if (offset <= seg->cache.u.segment.limit_scaled)
goto accessOK;
}
write_virtual_checks(seg, offset, 1);
goto accessOK;
}
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_virtual_word_32(unsigned s, Bit32u offset, Bit16u data)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_WRITE);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE);
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 2, CPL, BX_WRITE, (Bit8u*) &data);
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
WriteHostWordToLittleEndian(hostAddr, data);
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 1) {
BX_ERROR(("write_virtual_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_write_linear(laddr, 2, CPL, (void *) &data);
return;
}
if (seg->cache.valid & SegAccessWOK) {
if (offset < seg->cache.u.segment.limit_scaled)
goto accessOK;
}
write_virtual_checks(seg, offset, 2);
goto accessOK;
}
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_virtual_dword_32(unsigned s, Bit32u offset, Bit32u data)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_WRITE);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 4, CPL, BX_WRITE, (Bit8u*) &data);
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
WriteHostDWordToLittleEndian(hostAddr, data);
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 3) {
BX_ERROR(("write_virtual_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_write_linear(laddr, 4, CPL, (void *) &data);
return;
}
if (seg->cache.valid & SegAccessWOK) {
if (offset < (seg->cache.u.segment.limit_scaled-2))
goto accessOK;
}
write_virtual_checks(seg, offset, 4);
goto accessOK;
}
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_virtual_qword_32(unsigned s, Bit32u offset, Bit64u data)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_WRITE);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_WRITE);
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 8, CPL, BX_WRITE, (Bit8u*) &data);
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
WriteHostQWordToLittleEndian(hostAddr, data);
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 7) {
BX_ERROR(("write_virtual_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_write_linear(laddr, 8, CPL, (void *) &data);
return;
}
if (seg->cache.valid & SegAccessWOK) {
if (offset <= (seg->cache.u.segment.limit_scaled-7))
goto accessOK;
}
write_virtual_checks(seg, offset, 8);
goto accessOK;
}
Bit8u BX_CPP_AttrRegparmN(2)
BX_CPU_C::read_virtual_byte_32(unsigned s, Bit32u offset)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
Bit8u data;
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_READ);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessROK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_READ);
Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
data = *hostAddr;
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
return data;
}
}
#endif
access_read_linear(laddr, 1, CPL, BX_READ, (void *) &data);
return data;
}
if (seg->cache.valid & SegAccessROK) {
if (offset <= seg->cache.u.segment.limit_scaled)
goto accessOK;
}
read_virtual_checks(seg, offset, 1);
goto accessOK;
}
Bit16u BX_CPP_AttrRegparmN(2)
BX_CPU_C::read_virtual_word_32(unsigned s, Bit32u offset)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
Bit16u data;
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_READ);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessROK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_READ);
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
ReadHostWordFromLittleEndian(hostAddr, data);
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data);
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 1) {
BX_ERROR(("read_virtual_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_read_linear(laddr, 2, CPL, BX_READ, (void *) &data);
return data;
}
if (seg->cache.valid & SegAccessROK) {
if (offset < seg->cache.u.segment.limit_scaled)
goto accessOK;
}
read_virtual_checks(seg, offset, 2);
goto accessOK;
}
Bit32u BX_CPP_AttrRegparmN(2)
BX_CPU_C::read_virtual_dword_32(unsigned s, Bit32u offset)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
Bit32u data;
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_READ);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessROK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_READ);
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
ReadHostDWordFromLittleEndian(hostAddr, data);
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 3) {
BX_ERROR(("read_virtual_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_read_linear(laddr, 4, CPL, BX_READ, (void *) &data);
return data;
}
if (seg->cache.valid & SegAccessROK) {
if (offset < (seg->cache.u.segment.limit_scaled-2))
goto accessOK;
}
read_virtual_checks(seg, offset, 4);
goto accessOK;
}
Bit64u BX_CPP_AttrRegparmN(2)
BX_CPU_C::read_virtual_qword_32(unsigned s, Bit32u offset)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
Bit64u data;
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_READ);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessROK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
// from this CPL.
if (tlbEntry->accessBits & (1<<CPL)) { // Read this pl OK.
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_READ);
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
ReadHostQWordFromLittleEndian(hostAddr, data);
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 7) {
BX_ERROR(("read_virtual_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_read_linear(laddr, 8, CPL, BX_READ, (void *) &data);
return data;
}
if (seg->cache.valid & SegAccessROK) {
if (offset <= (seg->cache.u.segment.limit_scaled-7))
goto accessOK;
}
read_virtual_checks(seg, offset, 8);
goto accessOK;
}
//////////////////////////////////////////////////////////////
// special Read-Modify-Write operations //
// address translation info is kept across read/write calls //
//////////////////////////////////////////////////////////////
Bit8u BX_CPP_AttrRegparmN(2)
BX_CPU_C::read_RMW_virtual_byte_32(unsigned s, Bit32u offset)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
Bit8u data;
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 1, BX_RW);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 0);
Bit32u lpf = LPFOf(laddr);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 1, BX_RW);
Bit8u *hostAddr = (Bit8u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
data = *hostAddr;
BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 1, CPL, BX_READ, (Bit8u*) &data);
return data;
}
}
#endif
access_read_linear(laddr, 1, CPL, BX_RW, (void *) &data);
return data;
}
if (seg->cache.valid & SegAccessWOK) {
if (offset <= seg->cache.u.segment.limit_scaled)
goto accessOK;
}
write_virtual_checks(seg, offset, 1);
goto accessOK;
}
Bit16u BX_CPP_AttrRegparmN(2)
BX_CPU_C::read_RMW_virtual_word_32(unsigned s, Bit32u offset)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
Bit16u data;
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 2, BX_RW);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_RW);
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
ReadHostWordFromLittleEndian(hostAddr, data);
BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 2, CPL, BX_READ, (Bit8u*) &data);
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 1) {
BX_ERROR(("read_RMW_virtual_word(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_read_linear(laddr, 2, CPL, BX_RW, (void *) &data);
return data;
}
if (seg->cache.valid & SegAccessWOK) {
if (offset < seg->cache.u.segment.limit_scaled)
goto accessOK;
}
write_virtual_checks(seg, offset, 2);
goto accessOK;
}
Bit32u BX_CPP_AttrRegparmN(2)
BX_CPU_C::read_RMW_virtual_dword_32(unsigned s, Bit32u offset)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
Bit32u data;
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 4, BX_RW);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_RW);
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
ReadHostDWordFromLittleEndian(hostAddr, data);
BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 4, CPL, BX_READ, (Bit8u*) &data);
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 3) {
BX_ERROR(("read_RMW_virtual_dword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_read_linear(laddr, 4, CPL, BX_RW, (void *) &data);
return data;
}
if (seg->cache.valid & SegAccessWOK) {
if (offset < (seg->cache.u.segment.limit_scaled-2))
goto accessOK;
}
write_virtual_checks(seg, offset, 4);
goto accessOK;
}
Bit64u BX_CPP_AttrRegparmN(2)
BX_CPU_C::read_RMW_virtual_qword_32(unsigned s, Bit32u offset)
{
Bit32u laddr;
bx_segment_reg_t *seg = &BX_CPU_THIS_PTR sregs[s];
Bit64u data;
BX_INSTR_MEM_DATA_ACCESS(BX_CPU_ID, s, offset, 8, BX_RW);
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
laddr = BX_CPU_THIS_PTR get_laddr32(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 8, BX_RW);
Bit64u *hostAddr = (Bit64u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
ReadHostQWordFromLittleEndian(hostAddr, data);
BX_CPU_THIS_PTR address_xlation.pages = (bx_ptr_equiv_t) hostAddr;
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 8, CPL, BX_READ, (Bit8u*) &data);
return data;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check()) {
if (laddr & 7) {
BX_ERROR(("read_RMW_virtual_qword(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_read_linear(laddr, 8, CPL, BX_RW, (void *) &data);
return data;
}
if (seg->cache.valid & SegAccessWOK) {
if (offset <= (seg->cache.u.segment.limit_scaled-7))
goto accessOK;
}
write_virtual_checks(seg, offset, 8);
goto accessOK;
}
void BX_CPP_AttrRegparmN(1)
BX_CPU_C::write_RMW_virtual_byte(Bit8u val8)
{
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1, 2, BX_WRITE, (Bit8u*) &val8);
if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
// Pages > 2 means it stores a host address for direct access.
Bit8u *hostAddr = (Bit8u *) BX_CPU_THIS_PTR address_xlation.pages;
*hostAddr = val8;
}
else {
// address_xlation.pages must be 1
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress1, 1, &val8);
}
}
void BX_CPP_AttrRegparmN(1)
BX_CPU_C::write_RMW_virtual_word(Bit16u val16)
{
if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
// Pages > 2 means it stores a host address for direct access.
Bit16u *hostAddr = (Bit16u *) BX_CPU_THIS_PTR address_xlation.pages;
WriteHostWordToLittleEndian(hostAddr, val16);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1, 2, BX_WRITE, (Bit8u*) &val16);
}
else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress1, 2, &val16);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1, 2, BX_WRITE, (Bit8u*) &val16);
}
else {
#ifdef BX_LITTLE_ENDIAN
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress1, 1, &val16);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1, 1, BX_WRITE, (Bit8u*) &val16);
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress2, 1, ((Bit8u *) &val16) + 1);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress2, 1, BX_WRITE, ((Bit8u*) &val16)+1);
#else
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress1, 1, ((Bit8u *) &val16) + 1);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1, 1, BX_WRITE, ((Bit8u*) &val16)+1);
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress2, 1, &val16);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress2, 1, BX_WRITE, (Bit8u*) &val16);
#endif
}
}
void BX_CPP_AttrRegparmN(1)
BX_CPU_C::write_RMW_virtual_dword(Bit32u val32)
{
if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
// Pages > 2 means it stores a host address for direct access.
Bit32u *hostAddr = (Bit32u *) BX_CPU_THIS_PTR address_xlation.pages;
WriteHostDWordToLittleEndian(hostAddr, val32);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1, 4, BX_WRITE, (Bit8u*) &val32);
}
else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress1, 4, &val32);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1, 4, BX_WRITE, (Bit8u*) &val32);
}
else {
#ifdef BX_LITTLE_ENDIAN
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress1,
BX_CPU_THIS_PTR address_xlation.len1,
&val32);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1,
BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE, (Bit8u*) &val32);
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress2,
BX_CPU_THIS_PTR address_xlation.len2,
((Bit8u *) &val32) + BX_CPU_THIS_PTR address_xlation.len1);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress2,
BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE,
((Bit8u *) &val32) + BX_CPU_THIS_PTR address_xlation.len1);
#else
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress1,
BX_CPU_THIS_PTR address_xlation.len1,
((Bit8u *) &val32) + (4 - BX_CPU_THIS_PTR address_xlation.len1));
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1,
BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE,
((Bit8u *) &val32) + (4 - BX_CPU_THIS_PTR address_xlation.len1));
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress2,
BX_CPU_THIS_PTR address_xlation.len2,
&val32);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress2,
BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE, (Bit8u*) &val32);
#endif
}
}
void BX_CPP_AttrRegparmN(1)
BX_CPU_C::write_RMW_virtual_qword(Bit64u val64)
{
if (BX_CPU_THIS_PTR address_xlation.pages > 2) {
// Pages > 2 means it stores a host address for direct access.
Bit64u *hostAddr = (Bit64u *) BX_CPU_THIS_PTR address_xlation.pages;
WriteHostQWordToLittleEndian(hostAddr, val64);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1, 8, BX_WRITE, (Bit8u*) &val64);
}
else if (BX_CPU_THIS_PTR address_xlation.pages == 1) {
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress1, 8, &val64);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1, 8, BX_WRITE, (Bit8u*) &val64);
}
else {
#ifdef BX_LITTLE_ENDIAN
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress1,
BX_CPU_THIS_PTR address_xlation.len1,
&val64);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1,
BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE, (Bit8u*) &val64);
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress2,
BX_CPU_THIS_PTR address_xlation.len2,
((Bit8u *) &val64) + BX_CPU_THIS_PTR address_xlation.len1);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress2,
BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE,
((Bit8u *) &val64) + BX_CPU_THIS_PTR address_xlation.len1);
#else
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress1,
BX_CPU_THIS_PTR address_xlation.len1,
((Bit8u *) &val64) + (8 - BX_CPU_THIS_PTR address_xlation.len1));
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress1,
BX_CPU_THIS_PTR address_xlation.len1, BX_WRITE,
((Bit8u *) &val32) + (8 - BX_CPU_THIS_PTR address_xlation.len1));
BX_MEM(0)->writePhysicalPage(BX_CPU_THIS,
BX_CPU_THIS_PTR address_xlation.paddress2,
BX_CPU_THIS_PTR address_xlation.len2,
&val64);
BX_DBG_PHY_MEMORY_ACCESS(BX_CPU_ID,
BX_CPU_THIS_PTR address_xlation.paddress2,
BX_CPU_THIS_PTR address_xlation.len2, BX_WRITE, (Bit8u*) &val64);
#endif
}
}
//
// Write data to new stack, these methods are required for emulation
// correctness but not performance critical.
//
// assuming the write happens in legacy mode
void BX_CPU_C::write_new_stack_word_32(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit16u data)
{
Bit32u laddr;
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
laddr = (Bit32u)(seg->cache.u.segment.base + offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 2, BX_WRITE);
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 2, curr_pl, BX_WRITE, (Bit8u*) &data);
Bit16u *hostAddr = (Bit16u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
WriteHostWordToLittleEndian(hostAddr, data);
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check() && curr_pl == 3) {
if (laddr & 1) {
BX_ERROR(("write_new_stack_word_32(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_write_linear(laddr, 2, curr_pl, (void *) &data);
return;
}
if (seg->cache.valid & SegAccessWOK) {
if (offset < seg->cache.u.segment.limit_scaled)
goto accessOK;
}
write_virtual_checks(seg, offset, 2);
goto accessOK;
}
// assuming the write happens in legacy mode
void BX_CPU_C::write_new_stack_dword_32(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit32u data)
{
Bit32u laddr;
BX_ASSERT(BX_CPU_THIS_PTR cpu_mode != BX_MODE_LONG_64);
if (seg->cache.valid & SegAccessWOK4G) {
accessOK:
laddr = (Bit32u)(seg->cache.u.segment.base + offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
Bit32u lpf = AlignedAccessLPFOf32(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
// from this CPL.
if (tlbEntry->accessBits & (0x10 << CPL)) {
bx_hostpageaddr_t hostPageAddr = tlbEntry->hostPageAddr;
Bit32u pageOffset = PAGE_OFFSET(laddr);
BX_INSTR_LIN_ACCESS(BX_CPU_ID, laddr, tlbEntry->ppf | pageOffset, 4, BX_WRITE);
BX_DBG_LIN_MEMORY_ACCESS(BX_CPU_ID, laddr,
tlbEntry->ppf | pageOffset, 4, curr_pl, BX_WRITE, (Bit8u*) &data);
Bit32u *hostAddr = (Bit32u*) (hostPageAddr | pageOffset);
#if BX_SUPPORT_ICACHE
pageWriteStampTable.decWriteStamp(tlbEntry->ppf);
#endif
WriteHostDWordToLittleEndian(hostAddr, data);
return;
}
}
#endif
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
if (BX_CPU_THIS_PTR alignment_check() && curr_pl == 3) {
if (laddr & 3) {
BX_ERROR(("write_new_stack_dword_32(): #AC misaligned access"));
exception(BX_AC_EXCEPTION, 0, 0);
}
}
#endif
access_write_linear(laddr, 4, curr_pl, (void *) &data);
return;
}
if (seg->cache.valid & SegAccessWOK) {
if (offset < (seg->cache.u.segment.limit_scaled-2))
goto accessOK;
}
write_virtual_checks(seg, offset, 4);
goto accessOK;
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: access64.cc,v 1.4 2008-05-12 19:19:03 sshwarts Exp $
// $Id: access64.cc,v 1.5 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008 Stanislav Shwartsman
@ -33,10 +33,10 @@
// BX_CPU_THIS_PTR alignment_check_mask must be initialized to all'ones if
// alignment check exception is enabled and LPF_MASK if not.
#if BX_CPU_LEVEL >= 4 && BX_SUPPORT_ALIGNMENT_CHECK
#define AlignedAccessLPFOf(laddr, alignment_mask) \
#define AlignedAccessLPFOf64(laddr, alignment_mask) \
((laddr) & (LPF_MASK | (alignment_mask))) & (BX_CPU_THIS_PTR alignment_check_mask)
#else
#define AlignedAccessLPFOf(laddr, alignment_mask) LPFOf(laddr)
#define AlignedAccessLPFOf64(laddr, alignment_mask) LPFOf(laddr)
#endif
void BX_CPP_AttrRegparmN(3)
@ -90,7 +90,7 @@ BX_CPU_C::write_virtual_word_64(unsigned s, Bit64u offset, Bit16u data)
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
Bit64u lpf = AlignedAccessLPFOf(laddr, 1);
Bit64u lpf = AlignedAccessLPFOf64(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -139,7 +139,7 @@ BX_CPU_C::write_virtual_dword_64(unsigned s, Bit64u offset, Bit32u data)
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
Bit64u lpf = AlignedAccessLPFOf(laddr, 3);
Bit64u lpf = AlignedAccessLPFOf64(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -188,7 +188,7 @@ BX_CPU_C::write_virtual_qword_64(unsigned s, Bit64u offset, Bit64u data)
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
Bit64u lpf = AlignedAccessLPFOf64(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -277,7 +277,7 @@ BX_CPU_C::read_virtual_word_64(unsigned s, Bit64u offset)
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
Bit64u lpf = AlignedAccessLPFOf(laddr, 1);
Bit64u lpf = AlignedAccessLPFOf64(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
@ -325,7 +325,7 @@ BX_CPU_C::read_virtual_dword_64(unsigned s, Bit64u offset)
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
Bit64u lpf = AlignedAccessLPFOf(laddr, 3);
Bit64u lpf = AlignedAccessLPFOf64(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
@ -373,7 +373,7 @@ BX_CPU_C::read_virtual_qword_64(unsigned s, Bit64u offset)
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
Bit64u lpf = AlignedAccessLPFOf64(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us read access
@ -468,7 +468,7 @@ BX_CPU_C::read_RMW_virtual_word_64(unsigned s, Bit64u offset)
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 1);
Bit64u lpf = AlignedAccessLPFOf(laddr, 1);
Bit64u lpf = AlignedAccessLPFOf64(laddr, 1);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -520,7 +520,7 @@ BX_CPU_C::read_RMW_virtual_dword_64(unsigned s, Bit64u offset)
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 3);
Bit64u lpf = AlignedAccessLPFOf(laddr, 3);
Bit64u lpf = AlignedAccessLPFOf64(laddr, 3);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -572,7 +572,7 @@ BX_CPU_C::read_RMW_virtual_qword_64(unsigned s, Bit64u offset)
Bit64u laddr = BX_CPU_THIS_PTR get_laddr64(s, offset);
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
Bit64u lpf = AlignedAccessLPFOf64(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access
@ -616,7 +616,7 @@ void BX_CPU_C::write_new_stack_qword_64(Bit64u laddr, unsigned curr_pl, Bit64u d
{
#if BX_SupportGuest2HostTLB
unsigned tlbIndex = BX_TLB_INDEX_OF(laddr, 7);
Bit64u lpf = AlignedAccessLPFOf(laddr, 7);
Bit64u lpf = AlignedAccessLPFOf64(laddr, 7);
bx_TLB_entry *tlbEntry = &BX_CPU_THIS_PTR TLB.entry[tlbIndex];
if (tlbEntry->lpf == lpf) {
// See if the TLB entry privilege level allows us write access

View File

@ -1,5 +1,5 @@
////////////////////////////////////////////////////////////////////////
// $Id: call_far.cc,v 1.37 2008-05-25 15:53:29 sshwarts Exp $
// $Id: call_far.cc,v 1.38 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2005 Stanislav Shwartsman
@ -334,12 +334,12 @@ BX_CPU_C::call_protected(bxInstruction_c *i, Bit16u cs_raw, bx_address disp)
if (gate_descriptor.type==BX_286_CALL_GATE) {
for (unsigned i=0; i<param_count; i++) {
parameter_word[i] = read_virtual_word(BX_SEG_REG_SS, return_ESP + i*2);
parameter_word[i] = read_virtual_word_32(BX_SEG_REG_SS, return_ESP + i*2);
}
}
else {
for (unsigned i=0; i<param_count; i++) {
parameter_dword[i] = read_virtual_dword(BX_SEG_REG_SS, return_ESP + i*4);
parameter_dword[i] = read_virtual_dword_32(BX_SEG_REG_SS, return_ESP + i*4);
}
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: cpu.h,v 1.485 2008-06-12 16:40:52 sshwarts Exp $
// $Id: cpu.h,v 1.486 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -2813,32 +2813,45 @@ public: // for now...
BX_CPU_THIS_PTR eipPageWindowSize = 0;
}
BX_SMF void write_virtual_checks(bx_segment_reg_t *seg, bx_address offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_checks(bx_segment_reg_t *seg, bx_address offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF void execute_virtual_checks(bx_segment_reg_t *seg, bx_address offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_byte(unsigned seg, bx_address offset, Bit8u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_word(unsigned seg, bx_address offset, Bit16u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_dword(unsigned seg, bx_address offset, Bit32u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_qword(unsigned seg, bx_address offset, Bit64u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_dqword(unsigned s, bx_address off, Bit8u *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_dqword_aligned(unsigned s, bx_address off, Bit8u *data) BX_CPP_AttrRegparmN(3);
#if BX_SUPPORT_FPU
BX_SMF void write_virtual_tword(unsigned seg, bx_address offset, floatx80 *data) BX_CPP_AttrRegparmN(3);
#endif
BX_SMF Bit8u read_virtual_byte(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u read_virtual_word(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u read_virtual_dword(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u read_virtual_qword(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);
BX_SMF void read_virtual_dqword(unsigned s, bx_address off, Bit8u *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_dqword_aligned(unsigned s, bx_address off, Bit8u *data) BX_CPP_AttrRegparmN(3);
#if BX_SUPPORT_FPU
BX_SMF void read_virtual_tword(unsigned seg, bx_address offset, floatx80 *data) BX_CPP_AttrRegparmN(3);
#endif
// write of word/dword to new stack could happen only in legacy mode
BX_SMF void write_new_stack_word_32(bx_segment_reg_t *seg, bx_address offset, unsigned curr_pl, Bit16u data);
BX_SMF void write_new_stack_dword_32(bx_segment_reg_t *seg, bx_address offset, unsigned curr_pl, Bit32u data);
BX_SMF void write_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF void execute_virtual_checks(bx_segment_reg_t *seg, Bit32u offset, unsigned len) BX_CPP_AttrRegparmN(3);
BX_SMF Bit8u read_virtual_byte_32(unsigned seg, Bit32u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u read_virtual_word_32(unsigned seg, Bit32u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u read_virtual_dword_32(unsigned seg, Bit32u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u read_virtual_qword_32(unsigned seg, Bit32u offset) BX_CPP_AttrRegparmN(2);
BX_SMF void write_virtual_byte_32(unsigned seg, Bit32u offset, Bit8u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_word_32(unsigned seg, Bit32u offset, Bit16u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_dword_32(unsigned seg, Bit32u offset, Bit32u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_qword_32(unsigned seg, Bit32u offset, Bit64u data) BX_CPP_AttrRegparmN(3);
BX_SMF Bit8u read_RMW_virtual_byte_32(unsigned seg, Bit32u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u read_RMW_virtual_word_32(unsigned seg, Bit32u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u read_RMW_virtual_dword_32(unsigned seg, Bit32u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u read_RMW_virtual_qword_32(unsigned seg, Bit32u offset) BX_CPP_AttrRegparmN(2);
BX_SMF void write_RMW_virtual_byte(Bit8u val8) BX_CPP_AttrRegparmN(1);
BX_SMF void write_RMW_virtual_word(Bit16u val16) BX_CPP_AttrRegparmN(1);
BX_SMF void write_RMW_virtual_dword(Bit32u val32) BX_CPP_AttrRegparmN(1);
BX_SMF void write_RMW_virtual_qword(Bit64u val64) BX_CPP_AttrRegparmN(1);
#if BX_SUPPORT_X86_64
BX_SMF void write_new_stack_qword_64(Bit64u offset, unsigned curr_pl, Bit64u data);
BX_SMF void write_virtual_byte_64(unsigned seg, Bit64u offset, Bit8u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_word_64(unsigned seg, Bit64u offset, Bit16u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_dword_64(unsigned seg, Bit64u offset, Bit32u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_qword_64(unsigned seg, Bit64u offset, Bit64u data) BX_CPP_AttrRegparmN(3);
BX_SMF Bit8u read_virtual_byte_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u read_virtual_word_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u read_virtual_dword_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u read_virtual_qword_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit8u read_RMW_virtual_byte_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u read_RMW_virtual_word_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u read_RMW_virtual_dword_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u read_RMW_virtual_qword_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
#endif
#if BX_SUPPORT_MISALIGNED_SSE
@ -2856,30 +2869,82 @@ public: // for now...
#endif
BX_SMF Bit8u read_RMW_virtual_byte(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u read_RMW_virtual_word(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u read_RMW_virtual_dword(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u read_RMW_virtual_qword(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);
BX_SMF void write_RMW_virtual_byte(Bit8u val8) BX_CPP_AttrRegparmN(1);
BX_SMF void write_RMW_virtual_word(Bit16u val16) BX_CPP_AttrRegparmN(1);
BX_SMF void write_RMW_virtual_dword(Bit32u val32) BX_CPP_AttrRegparmN(1);
BX_SMF void write_RMW_virtual_qword(Bit64u val64) BX_CPP_AttrRegparmN(1);
BX_SMF void read_virtual_dqword(unsigned s, bx_address off, Bit8u *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_dqword_aligned(unsigned s, bx_address off, Bit8u *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_dqword(unsigned s, bx_address off, Bit8u *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_dqword_aligned(unsigned s, bx_address off, Bit8u *data) BX_CPP_AttrRegparmN(3);
// write of word/dword to new stack could happen only in legacy mode
BX_SMF void write_new_stack_word_32(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit16u data);
BX_SMF void write_new_stack_dword_32(bx_segment_reg_t *seg, Bit32u offset, unsigned curr_pl, Bit32u data);
#if BX_SUPPORT_X86_64
BX_SMF void write_virtual_byte_64(unsigned seg, Bit64u offset, Bit8u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_word_64(unsigned seg, Bit64u offset, Bit16u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_dword_64(unsigned seg, Bit64u offset, Bit32u data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_qword_64(unsigned seg, Bit64u offset, Bit64u data) BX_CPP_AttrRegparmN(3);
BX_SMF Bit8u read_virtual_byte_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u read_virtual_word_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u read_virtual_dword_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u read_virtual_qword_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit8u read_RMW_virtual_byte_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit16u read_RMW_virtual_word_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit32u read_RMW_virtual_dword_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF Bit64u read_RMW_virtual_qword_64(unsigned seg, Bit64u offset) BX_CPP_AttrRegparmN(2);
BX_SMF void write_new_stack_qword_64(Bit64u offset, unsigned curr_pl, Bit64u data);
#endif
// write
#define write_virtual_byte(seg, offset, data) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
write_virtual_byte_64(seg, offset, data) : \
write_virtual_byte_32(seg, offset, data)
#define write_virtual_word(seg, offset, data) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
write_virtual_word_64(seg, offset, data) : \
write_virtual_word_32(seg, offset, data)
#define write_virtual_dword(seg, offset, data) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
write_virtual_dword_64(seg, offset, data) : \
write_virtual_dword_32(seg, offset, data)
#define write_virtual_qword(seg, offset, data) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
write_virtual_qword_64(seg, offset, data) : \
write_virtual_qword_32(seg, offset, data)
// read
#define read_virtual_byte(seg, offset) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
read_virtual_byte_64(seg, offset) : \
read_virtual_byte_32(seg, offset)
#define read_virtual_word(seg, offset) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
read_virtual_word_64(seg, offset) : \
read_virtual_word_32(seg, offset)
#define read_virtual_dword(seg, offset) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
read_virtual_dword_64(seg, offset) : \
read_virtual_dword_32(seg, offset)
#define read_virtual_qword(seg, offset) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
read_virtual_qword_64(seg, offset) : \
read_virtual_qword_32(seg, offset)
// RMW
#define read_RMW_virtual_byte(seg, offset) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
read_RMW_virtual_byte_64(seg, offset) : \
read_RMW_virtual_byte_32(seg, offset)
#define read_RMW_virtual_word(seg, offset) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
read_RMW_virtual_word_64(seg, offset) : \
read_RMW_virtual_word_32(seg, offset)
#define read_RMW_virtual_dword(seg, offset) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
read_RMW_virtual_dword_64(seg, offset) : \
read_RMW_virtual_dword_32(seg, offset)
#define read_RMW_virtual_qword(seg, offset) \
(BX_CPU_THIS_PTR cpu_mode == BX_MODE_LONG_64) ? \
read_RMW_virtual_qword_64(seg, offset) : \
read_RMW_virtual_qword_32(seg, offset)
#if BX_SupportGuest2HostTLB
BX_SMF Bit8u* v2h_read_byte(bx_address laddr, unsigned curr_pl) BX_CPP_AttrRegparmN(2);
BX_SMF Bit8u* v2h_read(bx_address laddr, unsigned curr_pl, unsigned len) BX_CPP_AttrRegparmN(3);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: io.cc,v 1.59 2008-05-03 17:33:30 sshwarts Exp $
// $Id: io.cc,v 1.60 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -268,12 +268,12 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSB_YbDX(bxInstruction_c *i)
if (i->as64L()) {
// Write a zero to memory, to trigger any segment or page
// faults before reading from IO port.
write_virtual_byte(BX_SEG_REG_ES, RDI, value8);
write_virtual_byte_64(BX_SEG_REG_ES, RDI, value8);
value8 = BX_INP(DX, 1);
/* no seg override possible */
write_virtual_byte(BX_SEG_REG_ES, RDI, value8);
write_virtual_byte_64(BX_SEG_REG_ES, RDI, value8);
if (BX_CPU_THIS_PTR get_DF())
RDI--;
@ -299,15 +299,15 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSB_YbDX(bxInstruction_c *i)
RDI = EDI + 1;
}
}
else {
else { // 16-bit address size
// Write a zero to memory, to trigger any segment or page
// faults before reading from IO port.
write_virtual_byte(BX_SEG_REG_ES, DI, value8);
write_virtual_byte_32(BX_SEG_REG_ES, DI, value8);
value8 = BX_INP(DX, 1);
/* no seg override possible */
write_virtual_byte(BX_SEG_REG_ES, DI, value8);
write_virtual_byte_32(BX_SEG_REG_ES, DI, value8);
if (BX_CPU_THIS_PTR get_DF())
DI--;
@ -327,12 +327,12 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSW_YwDX(bxInstruction_c *i)
// Write a zero to memory, to trigger any segment or page
// faults before reading from IO port.
write_virtual_word(BX_SEG_REG_ES, rdi, value16);
write_virtual_word_64(BX_SEG_REG_ES, rdi, value16);
value16 = BX_INP(DX, 2);
/* no seg override allowed */
write_virtual_word(BX_SEG_REG_ES, rdi, value16);
write_virtual_word_64(BX_SEG_REG_ES, rdi, value16);
if (BX_CPU_THIS_PTR get_DF())
rdi -= 2;
@ -429,12 +429,12 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSD_YdDX(bxInstruction_c *i)
// Write a zero to memory, to trigger any segment or page
// faults before reading from IO port.
write_virtual_dword(BX_SEG_REG_ES, rdi, 0);
write_virtual_dword_64(BX_SEG_REG_ES, rdi, 0);
Bit32u value32 = BX_INP(DX, 4);
/* no seg override allowed */
write_virtual_dword(BX_SEG_REG_ES, rdi, value32);
write_virtual_dword_64(BX_SEG_REG_ES, rdi, value32);
if (BX_CPU_THIS_PTR get_DF())
rdi -= 4;
@ -464,17 +464,17 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::INSD_YdDX(bxInstruction_c *i)
RDI = edi;
}
else {
else { // 16-bit address size
Bit16u di = DI;
// Write a zero to memory, to trigger any segment or page
// faults before reading from IO port.
write_virtual_dword(BX_SEG_REG_ES, di, 0);
write_virtual_dword_32(BX_SEG_REG_ES, di, 0);
Bit32u value32 = BX_INP(DX, 4);
/* no seg override allowed */
write_virtual_dword(BX_SEG_REG_ES, di, value32);
write_virtual_dword_32(BX_SEG_REG_ES, di, value32);
if (BX_CPU_THIS_PTR get_DF())
di -= 4;
@ -536,7 +536,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::OUTSB_DXXb(bxInstruction_c *i)
if (i->as64L()) {
Bit64u rsi = RSI;
value8 = read_virtual_byte(i->seg(), rsi);
value8 = read_virtual_byte_64(i->seg(), rsi);
BX_OUTP(DX, value8, 1);
if (BX_CPU_THIS_PTR get_DF())
@ -561,10 +561,10 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::OUTSB_DXXb(bxInstruction_c *i)
RSI = esi;
}
else {
else { // address size 16-bit
Bit16u si = SI;
value8 = read_virtual_byte(i->seg(), si);
value8 = read_virtual_byte_32(i->seg(), si);
BX_OUTP(DX, value8, 1);
if (BX_CPU_THIS_PTR get_DF())
@ -590,7 +590,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::OUTSW_DXXw(bxInstruction_c *i)
if (i->as64L()) {
Bit64u rsi = RSI;
value16 = read_virtual_word(i->seg(), rsi);
value16 = read_virtual_word_64(i->seg(), rsi);
BX_OUTP(DX, value16, 2);
if (BX_CPU_THIS_PTR get_DF())
@ -676,7 +676,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::OUTSD_DXXd(bxInstruction_c *i)
if (i->as64L()) {
Bit64u rsi = RSI;
value32 = read_virtual_dword(i->seg(), rsi);
value32 = read_virtual_dword_64(i->seg(), rsi);
BX_OUTP(DX, value32, 4);
if (BX_CPU_THIS_PTR get_DF())
@ -701,10 +701,10 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::OUTSD_DXXd(bxInstruction_c *i)
RSI = esi;
}
else {
else { // address size 16-bit
Bit16u si = SI;
value32 = read_virtual_dword(i->seg(), si);
value32 = read_virtual_dword_32(i->seg(), si);
BX_OUTP(DX, value32, 4);
if (BX_CPU_THIS_PTR get_DF())

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: io_pro.cc,v 1.33 2008-05-26 18:02:07 sshwarts Exp $
// $Id: io_pro.cc,v 1.34 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -127,12 +127,7 @@ bx_bool BX_CPU_C::allow_io(Bit16u addr, unsigned len)
access_read_linear(BX_CPU_THIS_PTR tr.cache.u.system.base + 102,
2, 0, BX_READ, &io_base);
/*
if (io_base <= 103) {
BX_ERROR(("allow_io(): TR:io_base (%u) <= 103", io_base));
return(0);
}
*/
if ((io_base + addr/8) >= BX_CPU_THIS_PTR tr.cache.u.system.limit_scaled) {
BX_ERROR(("allow_io(): IO addr %x (len %d) outside TSS IO permission map (base=%x, limit=%x) #GP(0)",
addr, len, io_base, BX_CPU_THIS_PTR tr.cache.u.system.limit_scaled));

View File

@ -1,5 +1,5 @@
////////////////////////////////////////////////////////////////////////
// $Id: iret.cc,v 1.37 2008-05-23 13:46:52 sshwarts Exp $
// $Id: iret.cc,v 1.38 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2005 Stanislav Shwartsman
@ -137,9 +137,9 @@ BX_CPU_C::iret_protected(bxInstruction_c *i)
temp_ESP = SP;
if (i->os32L()) {
new_eflags = read_virtual_dword(BX_SEG_REG_SS, temp_ESP + 8);
raw_cs_selector = (Bit16u) read_virtual_dword(BX_SEG_REG_SS, temp_ESP + 4);
new_eip = read_virtual_dword(BX_SEG_REG_SS, temp_ESP + 0);
new_eflags = read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP + 8);
raw_cs_selector = (Bit16u) read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP + 4);
new_eip = read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP + 0);
// if VM=1 in flags image on stack then STACK_RETURN_TO_V86
if (new_eflags & EFlagsVMMask) {
@ -151,9 +151,9 @@ BX_CPU_C::iret_protected(bxInstruction_c *i)
}
}
else {
new_flags = read_virtual_word(BX_SEG_REG_SS, temp_ESP + 4);
raw_cs_selector = read_virtual_word(BX_SEG_REG_SS, temp_ESP + 2);
new_ip = read_virtual_word(BX_SEG_REG_SS, temp_ESP + 0);
new_flags = read_virtual_word_32(BX_SEG_REG_SS, temp_ESP + 4);
raw_cs_selector = read_virtual_word_32(BX_SEG_REG_SS, temp_ESP + 2);
new_ip = read_virtual_word_32(BX_SEG_REG_SS, temp_ESP + 0);
}
parse_selector(raw_cs_selector, &cs_selector);
@ -228,10 +228,10 @@ BX_CPU_C::iret_protected(bxInstruction_c *i)
/* examine return SS selector and associated descriptor */
if (i->os32L()) {
raw_ss_selector = (Bit16u) read_virtual_dword(BX_SEG_REG_SS, temp_ESP + 16);
raw_ss_selector = (Bit16u) read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP + 16);
}
else {
raw_ss_selector = read_virtual_word(BX_SEG_REG_SS, temp_ESP + 8);
raw_ss_selector = read_virtual_word_32(BX_SEG_REG_SS, temp_ESP + 8);
}
/* selector must be non-null, else #GP(0) */
@ -279,14 +279,14 @@ BX_CPU_C::iret_protected(bxInstruction_c *i)
}
if (i->os32L()) {
new_esp = read_virtual_dword(BX_SEG_REG_SS, temp_ESP + 12);
new_eflags = read_virtual_dword(BX_SEG_REG_SS, temp_ESP + 8);
new_eip = read_virtual_dword(BX_SEG_REG_SS, temp_ESP + 0);
new_esp = read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP + 12);
new_eflags = read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP + 8);
new_eip = read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP + 0);
}
else {
new_esp = read_virtual_word(BX_SEG_REG_SS, temp_ESP + 6);
new_eflags = read_virtual_word(BX_SEG_REG_SS, temp_ESP + 4);
new_eip = read_virtual_word(BX_SEG_REG_SS, temp_ESP + 0);
new_esp = read_virtual_word_32(BX_SEG_REG_SS, temp_ESP + 6);
new_eflags = read_virtual_word_32(BX_SEG_REG_SS, temp_ESP + 4);
new_eip = read_virtual_word_32(BX_SEG_REG_SS, temp_ESP + 0);
}
// ID,VIP,VIF,AC,VM,RF,x,NT,IOPL,OF,DF,IF,TF,SF,ZF,x,AF,x,PF,x,CF
@ -371,15 +371,15 @@ BX_CPU_C::long_iret(bxInstruction_c *i)
else
#endif
if (i->os32L()) {
new_eflags = read_virtual_dword(BX_SEG_REG_SS, temp_RSP + 8);
raw_cs_selector = (Bit16u) read_virtual_dword(BX_SEG_REG_SS, temp_RSP + 4);
new_rip = (Bit64u) read_virtual_dword(BX_SEG_REG_SS, temp_RSP + 0);
new_eflags = read_virtual_dword_32(BX_SEG_REG_SS, temp_RSP + 8);
raw_cs_selector = (Bit16u) read_virtual_dword_32(BX_SEG_REG_SS, temp_RSP + 4);
new_rip = (Bit64u) read_virtual_dword_32(BX_SEG_REG_SS, temp_RSP + 0);
top_nbytes_same = 12;
}
else {
new_eflags = read_virtual_word(BX_SEG_REG_SS, temp_RSP + 4);
raw_cs_selector = read_virtual_word(BX_SEG_REG_SS, temp_RSP + 2);
new_rip = (Bit64u) read_virtual_word(BX_SEG_REG_SS, temp_RSP + 0);
new_eflags = read_virtual_word_32(BX_SEG_REG_SS, temp_RSP + 4);
raw_cs_selector = read_virtual_word_32(BX_SEG_REG_SS, temp_RSP + 2);
new_rip = (Bit64u) read_virtual_word_32(BX_SEG_REG_SS, temp_RSP + 0);
top_nbytes_same = 6;
}
@ -460,12 +460,12 @@ BX_CPU_C::long_iret(bxInstruction_c *i)
#endif
{
if (i->os32L()) {
raw_ss_selector = (Bit16u) read_virtual_dword(BX_SEG_REG_SS, temp_RSP + 16);
new_rsp = (Bit64u) read_virtual_dword(BX_SEG_REG_SS, temp_RSP + 12);
raw_ss_selector = (Bit16u) read_virtual_dword_32(BX_SEG_REG_SS, temp_RSP + 16);
new_rsp = (Bit64u) read_virtual_dword_32(BX_SEG_REG_SS, temp_RSP + 12);
}
else {
raw_ss_selector = read_virtual_word(BX_SEG_REG_SS, temp_RSP + 8);
new_rsp = (Bit64u) read_virtual_word(BX_SEG_REG_SS, temp_RSP + 6);
raw_ss_selector = read_virtual_word_32(BX_SEG_REG_SS, temp_RSP + 8);
new_rsp = (Bit64u) read_virtual_word_32(BX_SEG_REG_SS, temp_RSP + 6);
}
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: proc_ctrl.cc,v 1.238 2008-06-02 19:50:40 sshwarts Exp $
// $Id: proc_ctrl.cc,v 1.239 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -1939,7 +1939,10 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MONITOR(bxInstruction_c *i)
offset = AX;
}
read_virtual_checks(&BX_CPU_THIS_PTR sregs[i->seg()], offset, 1);
// check if we could access the memory segment
if (!(seg->cache.valid & SegAccessROK4G)) {
read_virtual_checks(&BX_CPU_THIS_PTR sregs[i->seg()], offset, 1);
}
// set MONITOR
laddr = BX_CPU_THIS_PTR get_laddr(i->seg(), offset);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: protect_ctrl.cc,v 1.87 2008-05-26 21:51:46 sshwarts Exp $
// $Id: protect_ctrl.cc,v 1.88 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -671,8 +671,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SGDT_Ms(bxInstruction_c *i)
BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
write_virtual_word(i->seg(), RMAddr(i), limit_16);
write_virtual_dword(i->seg(), RMAddr(i)+2, base_32);
write_virtual_word_32(i->seg(), RMAddr(i), limit_16);
write_virtual_dword_32(i->seg(), RMAddr(i)+2, base_32);
}
void BX_CPP_AttrRegparmN(1) BX_CPU_C::SIDT_Ms(bxInstruction_c *i)
@ -682,8 +682,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SIDT_Ms(bxInstruction_c *i)
BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
write_virtual_word(i->seg(), RMAddr(i), limit_16);
write_virtual_dword(i->seg(), RMAddr(i)+2, base_32);
write_virtual_word_32(i->seg(), RMAddr(i), limit_16);
write_virtual_dword_32(i->seg(), RMAddr(i)+2, base_32);
}
void BX_CPP_AttrRegparmN(1) BX_CPU_C::LGDT_Ms(bxInstruction_c *i)
@ -702,8 +702,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LGDT_Ms(bxInstruction_c *i)
BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
Bit16u limit_16 = read_virtual_word(i->seg(), RMAddr(i));
Bit32u base_32 = read_virtual_dword(i->seg(), RMAddr(i) + 2);
Bit16u limit_16 = read_virtual_word_32(i->seg(), RMAddr(i));
Bit32u base_32 = read_virtual_dword_32(i->seg(), RMAddr(i) + 2);
if (i->os32L() == 0) base_32 &= 0x00ffffff; /* ignore upper 8 bits */
@ -727,8 +727,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LIDT_Ms(bxInstruction_c *i)
BX_CPU_CALL_METHODR(i->ResolveModrm, (i));
Bit32u base_32 = read_virtual_dword(i->seg(), RMAddr(i) + 2);
Bit16u limit_16 = read_virtual_word(i->seg(), RMAddr(i));
Bit32u base_32 = read_virtual_dword_32(i->seg(), RMAddr(i) + 2);
Bit16u limit_16 = read_virtual_word_32(i->seg(), RMAddr(i));
if (i->os32L() == 0) base_32 &= 0x00ffffff; /* ignore upper 8 bits */

View File

@ -1,5 +1,5 @@
////////////////////////////////////////////////////////////////////////
// $Id: ret_far.cc,v 1.18 2008-05-10 18:10:53 sshwarts Exp $
// $Id: ret_far.cc,v 1.19 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2005 Stanislav Shwartsman
@ -69,13 +69,13 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
else
#endif
if (i->os32L()) {
raw_cs_selector = (Bit16u) read_virtual_dword(BX_SEG_REG_SS, temp_RSP + 4);
return_RIP = read_virtual_dword(BX_SEG_REG_SS, temp_RSP);
raw_cs_selector = (Bit16u) read_virtual_dword_32(BX_SEG_REG_SS, temp_RSP + 4);
return_RIP = read_virtual_dword_32(BX_SEG_REG_SS, temp_RSP);
stack_param_offset = 8;
}
else {
raw_cs_selector = read_virtual_word(BX_SEG_REG_SS, temp_RSP + 2);
return_RIP = read_virtual_word(BX_SEG_REG_SS, temp_RSP);
raw_cs_selector = read_virtual_word_32(BX_SEG_REG_SS, temp_RSP + 2);
return_RIP = read_virtual_word_32(BX_SEG_REG_SS, temp_RSP);
stack_param_offset = 4;
}
@ -139,18 +139,18 @@ BX_CPU_C::return_protected(bxInstruction_c *i, Bit16u pop_bytes)
#if BX_SUPPORT_X86_64
if (i->os64L()) {
raw_ss_selector = read_virtual_word_64 (BX_SEG_REG_SS, temp_RSP + 24 + pop_bytes);
raw_ss_selector = read_virtual_word_64(BX_SEG_REG_SS, temp_RSP + 24 + pop_bytes);
return_RSP = read_virtual_qword_64(BX_SEG_REG_SS, temp_RSP + 16 + pop_bytes);
}
else
#endif
if (i->os32L()) {
raw_ss_selector = read_virtual_word (BX_SEG_REG_SS, temp_RSP + 12 + pop_bytes);
return_RSP = read_virtual_dword(BX_SEG_REG_SS, temp_RSP + 8 + pop_bytes);
raw_ss_selector = read_virtual_word_32(BX_SEG_REG_SS, temp_RSP + 12 + pop_bytes);
return_RSP = read_virtual_dword_32(BX_SEG_REG_SS, temp_RSP + 8 + pop_bytes);
}
else {
raw_ss_selector = read_virtual_word(BX_SEG_REG_SS, temp_RSP + 6 + pop_bytes);
return_RSP = read_virtual_word(BX_SEG_REG_SS, temp_RSP + 4 + pop_bytes);
raw_ss_selector = read_virtual_word_32(BX_SEG_REG_SS, temp_RSP + 6 + pop_bytes);
return_RSP = read_virtual_word_32(BX_SEG_REG_SS, temp_RSP + 4 + pop_bytes);
}
/* selector index must be within its descriptor table limits,

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: sse_move.cc,v 1.90 2008-05-10 18:10:53 sshwarts Exp $
// $Id: sse_move.cc,v 1.91 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2003 Stanislav Shwartsman
@ -350,7 +350,10 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::FXRSTOR(bxInstruction_c *i)
/* load i387 register file */
for(index=0; index < 8; index++)
{
read_virtual_tword(i->seg(), RMAddr(i)+index*16+32, &(BX_FPU_REG(index)));
floatx80 reg;
reg.fraction = read_virtual_qword(i->seg(), RMAddr(i)+index*16+32);
reg.exp = read_virtual_word (i->seg(), RMAddr(i)+index*16+40);
BX_FPU_REG(index) = reg;
}
BX_CPU_THIS_PTR the_i387.twd = unpack_FPU_TW(tag_byte);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: stack.h,v 1.2 2008-05-10 18:10:53 sshwarts Exp $
// $Id: stack.h,v 1.3 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2007 Stanislav Shwartsman
@ -36,12 +36,12 @@ BX_CPU_C::push_16(Bit16u value16)
else
#endif
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) { /* StackAddrSize = 32 */
write_virtual_word(BX_SEG_REG_SS, (Bit32u) (ESP-2), value16);
write_virtual_word_32(BX_SEG_REG_SS, (Bit32u) (ESP-2), value16);
ESP -= 2;
}
else
{
write_virtual_word(BX_SEG_REG_SS, (Bit16u) (SP-2), value16);
write_virtual_word_32(BX_SEG_REG_SS, (Bit16u) (SP-2), value16);
SP -= 2;
}
}
@ -58,12 +58,12 @@ BX_CPU_C::push_32(Bit32u value32)
else
#endif
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) { /* StackAddrSize = 32 */
write_virtual_dword(BX_SEG_REG_SS, (Bit32u) (ESP-4), value32);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (ESP-4), value32);
ESP -= 4;
}
else
{
write_virtual_dword(BX_SEG_REG_SS, (Bit16u) (SP-4), value32);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (SP-4), value32);
SP -= 4;
}
}
@ -91,11 +91,11 @@ BX_CPP_INLINE Bit16u BX_CPU_C::pop_16(void)
else
#endif
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) {
value16 = read_virtual_word(BX_SEG_REG_SS, ESP);
value16 = read_virtual_word_32(BX_SEG_REG_SS, ESP);
ESP += 2;
}
else {
value16 = read_virtual_word(BX_SEG_REG_SS, SP);
value16 = read_virtual_word_32(BX_SEG_REG_SS, SP);
SP += 2;
}
@ -115,11 +115,11 @@ BX_CPP_INLINE Bit32u BX_CPU_C::pop_32(void)
else
#endif
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b) {
value32 = read_virtual_dword(BX_SEG_REG_SS, ESP);
value32 = read_virtual_dword_32(BX_SEG_REG_SS, ESP);
ESP += 4;
}
else {
value32 = read_virtual_dword(BX_SEG_REG_SS, SP);
value32 = read_virtual_dword_32(BX_SEG_REG_SS, SP);
SP += 4;
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: stack16.cc,v 1.40 2008-05-08 18:02:21 sshwarts Exp $
// $Id: stack16.cc,v 1.41 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -176,26 +176,26 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::PUSHAD16(bxInstruction_c *i)
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
{
write_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 2), AX);
write_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 4), CX);
write_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 6), DX);
write_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 8), BX);
write_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 10), temp_SP);
write_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 12), BP);
write_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 14), SI);
write_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 16), DI);
write_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 2), AX);
write_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 4), CX);
write_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 6), DX);
write_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 8), BX);
write_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 10), temp_SP);
write_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 12), BP);
write_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 14), SI);
write_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP - 16), DI);
ESP -= 16;
}
else
{
write_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP - 2), AX);
write_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP - 4), CX);
write_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP - 6), DX);
write_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP - 8), BX);
write_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP - 10), temp_SP);
write_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP - 12), BP);
write_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP - 14), SI);
write_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP - 16), DI);
write_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP - 2), AX);
write_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP - 4), CX);
write_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP - 6), DX);
write_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP - 8), BX);
write_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP - 10), temp_SP);
write_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP - 12), BP);
write_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP - 14), SI);
write_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP - 16), DI);
SP -= 16;
}
}
@ -207,27 +207,27 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::POPAD16(bxInstruction_c *i)
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
{
Bit32u temp_ESP = ESP;
di = read_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 0));
si = read_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 2));
bp = read_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 4));
dummy = read_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 6));
bx = read_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 8));
dx = read_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 10));
cx = read_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 12));
ax = read_virtual_word(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 14));
di = read_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 0));
si = read_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 2));
bp = read_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 4));
dummy = read_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 6));
bx = read_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 8));
dx = read_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 10));
cx = read_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 12));
ax = read_virtual_word_32(BX_SEG_REG_SS, (Bit32u)(temp_ESP + 14));
ESP += 16;
}
else
{
Bit16u temp_SP = SP;
di = read_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP + 0));
si = read_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP + 2));
bp = read_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP + 4));
dummy = read_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP + 6));
bx = read_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP + 8));
dx = read_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP + 10));
cx = read_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP + 12));
ax = read_virtual_word(BX_SEG_REG_SS, (Bit16u)(temp_SP + 14));
di = read_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP + 0));
si = read_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP + 2));
bp = read_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP + 4));
dummy = read_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP + 6));
bx = read_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP + 8));
dx = read_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP + 10));
cx = read_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP + 12));
ax = read_virtual_word_32(BX_SEG_REG_SS, (Bit16u)(temp_SP + 14));
SP += 16;
}
@ -259,7 +259,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER16_IwIb(bxInstruction_c *i)
/* do level-1 times */
while (--level) {
ebp -= 2;
Bit16u temp16 = read_virtual_word(BX_SEG_REG_SS, ebp);
Bit16u temp16 = read_virtual_word_32(BX_SEG_REG_SS, ebp);
push_16(temp16);
}
@ -283,7 +283,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER16_IwIb(bxInstruction_c *i)
/* do level-1 times */
while (--level) {
bp -= 2;
Bit16u temp16 = read_virtual_word(BX_SEG_REG_SS, bp);
Bit16u temp16 = read_virtual_word_32(BX_SEG_REG_SS, bp);
push_16(temp16);
}
@ -296,7 +296,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER16_IwIb(bxInstruction_c *i)
// ENTER finishes with memory write check on the final stack pointer
// the memory is touched but no write actually occurs
// emulate it by doing RMW read access from SS:SP
read_RMW_virtual_word(BX_SEG_REG_SS, SP);
read_RMW_virtual_word_32(BX_SEG_REG_SS, SP);
}
BP = frame_ptr16;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: stack32.cc,v 1.53 2008-05-08 18:02:21 sshwarts Exp $
// $Id: stack32.cc,v 1.54 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -176,26 +176,26 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::PUSHAD32(bxInstruction_c *i)
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
{
write_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 4), EAX);
write_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 8), ECX);
write_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 12), EDX);
write_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 16), EBX);
write_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 20), temp_ESP);
write_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 24), EBP);
write_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 28), ESI);
write_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 32), EDI);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 4), EAX);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 8), ECX);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 12), EDX);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 16), EBX);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 20), temp_ESP);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 24), EBP);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 28), ESI);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP - 32), EDI);
ESP -= 32;
}
else
{
write_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP - 4), EAX);
write_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP - 8), ECX);
write_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP - 12), EDX);
write_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP - 16), EBX);
write_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP - 20), temp_ESP);
write_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP - 24), EBP);
write_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP - 28), ESI);
write_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP - 32), EDI);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP - 4), EAX);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP - 8), ECX);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP - 12), EDX);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP - 16), EBX);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP - 20), temp_ESP);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP - 24), EBP);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP - 28), ESI);
write_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP - 32), EDI);
SP -= 32;
}
}
@ -207,27 +207,27 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::POPAD32(bxInstruction_c *i)
if (BX_CPU_THIS_PTR sregs[BX_SEG_REG_SS].cache.u.segment.d_b)
{
Bit32u temp_ESP = ESP;
edi = read_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 0));
esi = read_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 4));
ebp = read_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 8));
dummy = read_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 12));
ebx = read_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 16));
edx = read_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 20));
ecx = read_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 24));
eax = read_virtual_dword(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 28));
edi = read_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 0));
esi = read_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 4));
ebp = read_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 8));
dummy = read_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 12));
ebx = read_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 16));
edx = read_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 20));
ecx = read_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 24));
eax = read_virtual_dword_32(BX_SEG_REG_SS, (Bit32u) (temp_ESP + 28));
ESP += 32;
}
else
{
Bit16u temp_SP = SP;
edi = read_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP + 0));
esi = read_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP + 4));
ebp = read_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP + 8));
dummy = read_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP + 12));
ebx = read_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP + 16));
edx = read_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP + 20));
ecx = read_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP + 24));
eax = read_virtual_dword(BX_SEG_REG_SS, (Bit16u) (temp_SP + 28));
edi = read_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP + 0));
esi = read_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP + 4));
ebp = read_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP + 8));
dummy = read_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP + 12));
ebx = read_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP + 16));
edx = read_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP + 20));
ecx = read_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP + 24));
eax = read_virtual_dword_32(BX_SEG_REG_SS, (Bit16u) (temp_SP + 28));
SP += 32;
}
@ -259,7 +259,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER32_IwIb(bxInstruction_c *i)
/* do level-1 times */
while (--level) {
ebp -= 4;
Bit32u temp32 = read_virtual_dword(BX_SEG_REG_SS, ebp);
Bit32u temp32 = read_virtual_dword_32(BX_SEG_REG_SS, ebp);
push_32(temp32);
}
@ -281,7 +281,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER32_IwIb(bxInstruction_c *i)
/* do level-1 times */
while (--level) {
bp -= 4;
Bit32u temp32 = read_virtual_dword(BX_SEG_REG_SS, bp);
Bit32u temp32 = read_virtual_dword_32(BX_SEG_REG_SS, bp);
push_32(temp32);
}
@ -294,7 +294,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::ENTER32_IwIb(bxInstruction_c *i)
// ENTER finishes with memory write check on the final stack pointer
// the memory is touched but no write actually occurs
// emulate it by doing RMW read access from SS:SP
read_RMW_virtual_dword(BX_SEG_REG_SS, SP);
read_RMW_virtual_dword_32(BX_SEG_REG_SS, SP);
}
EBP = frame_ptr32;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: string.cc,v 1.60 2008-05-10 18:10:53 sshwarts Exp $
// $Id: string.cc,v 1.61 2008-06-12 19:14:39 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -634,14 +634,14 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSB16_XbYb(bxInstruction_c *i)
}
else {
temp8 = read_virtual_byte(i->seg(), SI);
write_virtual_byte(BX_SEG_REG_ES, DI, temp8);
write_virtual_byte_32(BX_SEG_REG_ES, DI, temp8);
}
}
else
#endif
{
temp8 = read_virtual_byte(i->seg(), SI);
write_virtual_byte(BX_SEG_REG_ES, DI, temp8);
temp8 = read_virtual_byte_32(i->seg(), SI);
write_virtual_byte_32(BX_SEG_REG_ES, DI, temp8);
}
if (BX_CPU_THIS_PTR get_DF()) {
@ -763,15 +763,15 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSW16_XwYw(bxInstruction_c *i)
incr = wordCount << 1; // count * 2
}
else {
temp16 = read_virtual_word(i->seg(), si);
write_virtual_word(BX_SEG_REG_ES, di, temp16);
temp16 = read_virtual_word_32(i->seg(), si);
write_virtual_word_32(BX_SEG_REG_ES, di, temp16);
}
}
else
#endif
{
temp16 = read_virtual_word(i->seg(), si);
write_virtual_word(BX_SEG_REG_ES, di, temp16);
temp16 = read_virtual_word_32(i->seg(), si);
write_virtual_word_32(BX_SEG_REG_ES, di, temp16);
}
if (BX_CPU_THIS_PTR get_DF()) {
@ -848,8 +848,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::MOVSD16_XdYd(bxInstruction_c *i)
Bit16u si = SI;
Bit16u di = DI;
temp32 = read_virtual_dword(i->seg(), si);
write_virtual_dword(BX_SEG_REG_ES, di, temp32);
temp32 = read_virtual_dword_32(i->seg(), si);
write_virtual_dword_32(BX_SEG_REG_ES, di, temp32);
if (BX_CPU_THIS_PTR get_DF()) {
si -= 4;
@ -1078,8 +1078,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSB16_XbYb(bxInstruction_c *i)
Bit16u si = SI;
Bit16u di = DI;
op1_8 = read_virtual_byte(i->seg(), si);
op2_8 = read_virtual_byte(BX_SEG_REG_ES, di);
op1_8 = read_virtual_byte_32(i->seg(), si);
op2_8 = read_virtual_byte_32(BX_SEG_REG_ES, di);
diff_8 = op1_8 - op2_8;
@ -1165,8 +1165,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSW16_XwYw(bxInstruction_c *i)
Bit16u si = SI;
Bit16u di = DI;
op1_16 = read_virtual_word(i->seg(), si);
op2_16 = read_virtual_word(BX_SEG_REG_ES, di);
op1_16 = read_virtual_word_32(i->seg(), si);
op2_16 = read_virtual_word_32(BX_SEG_REG_ES, di);
diff_16 = op1_16 - op2_16;
@ -1252,8 +1252,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CMPSD16_XdYd(bxInstruction_c *i)
Bit16u si = SI;
Bit16u di = DI;
op1_32 = read_virtual_dword(i->seg(), si);
op2_32 = read_virtual_dword(BX_SEG_REG_ES, di);
op1_32 = read_virtual_dword_32(i->seg(), si);
op2_32 = read_virtual_dword_32(BX_SEG_REG_ES, di);
diff_32 = op1_32 - op2_32;
@ -1466,7 +1466,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASB16_ALXb(bxInstruction_c *i)
Bit16u di = DI;
op2_8 = read_virtual_byte(BX_SEG_REG_ES, di);
op2_8 = read_virtual_byte_32(BX_SEG_REG_ES, di);
diff_8 = op1_8 - op2_8;
@ -1537,7 +1537,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASW16_AXXw(bxInstruction_c *i)
Bit16u di = DI;
op2_16 = read_virtual_word(BX_SEG_REG_ES, di);
op2_16 = read_virtual_word_32(BX_SEG_REG_ES, di);
diff_16 = op1_16 - op2_16;
SET_FLAGS_OSZAPC_SUB_16(op1_16, op2_16, diff_16);
@ -1607,7 +1607,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::SCASD16_EAXXd(bxInstruction_c *i)
Bit16u di = DI;
op2_32 = read_virtual_dword(BX_SEG_REG_ES, di);
op2_32 = read_virtual_dword_32(BX_SEG_REG_ES, di);
diff_32 = op1_32 - op2_32;
SET_FLAGS_OSZAPC_SUB_32(op1_32, op2_32, diff_32);
@ -1793,7 +1793,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSB16_YbAL(bxInstruction_c *i)
{
Bit16u di = DI;
write_virtual_byte(BX_SEG_REG_ES, di, AL);
write_virtual_byte_32(BX_SEG_REG_ES, di, AL);
if (BX_CPU_THIS_PTR get_DF()) {
di--;
@ -1876,7 +1876,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSW16_YwAX(bxInstruction_c *i)
{
Bit16u di = DI;
write_virtual_word(BX_SEG_REG_ES, di, AX);
write_virtual_word_32(BX_SEG_REG_ES, di, AX);
if (BX_CPU_THIS_PTR get_DF()) {
di -= 2;
@ -1930,7 +1930,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::STOSD16_YdEAX(bxInstruction_c *i)
{
Bit16u di = DI;
write_virtual_dword(BX_SEG_REG_ES, di, EAX);
write_virtual_dword_32(BX_SEG_REG_ES, di, EAX);
if (BX_CPU_THIS_PTR get_DF()) {
di -= 4;
@ -2090,7 +2090,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSB16_ALXb(bxInstruction_c *i)
{
Bit16u si = SI;
AL = read_virtual_byte(i->seg(), si);
AL = read_virtual_byte_32(i->seg(), si);
if (BX_CPU_THIS_PTR get_DF()) {
si--;
@ -2144,7 +2144,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSW16_AXXw(bxInstruction_c *i)
{
Bit16u si = SI;
AX = read_virtual_word(i->seg(), si);
AX = read_virtual_word_32(i->seg(), si);
if (BX_CPU_THIS_PTR get_DF()) {
si -= 2;
@ -2198,7 +2198,7 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::LODSD16_EAXXd(bxInstruction_c *i)
{
Bit16u si = SI;
RAX = read_virtual_dword(i->seg(), si);
RAX = read_virtual_dword_32(i->seg(), si);
if (BX_CPU_THIS_PTR get_DF()) {
si -= 4;

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: vm8086.cc,v 1.45 2008-05-26 21:46:39 sshwarts Exp $
// $Id: vm8086.cc,v 1.46 2008-06-12 19:14:40 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001 MandrakeSoft S.A.
@ -73,14 +73,14 @@ void BX_CPU_C::stack_return_to_v86(Bit32u new_eip, Bit32u raw_cs_selector, Bit32
temp_ESP = SP;
// load SS:ESP from stack
new_esp = read_virtual_dword(BX_SEG_REG_SS, temp_ESP+12);
raw_ss_selector = (Bit16u) read_virtual_dword(BX_SEG_REG_SS, temp_ESP+16);
new_esp = read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP+12);
raw_ss_selector = (Bit16u) read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP+16);
// load ES,DS,FS,GS from stack
raw_es_selector = (Bit16u) read_virtual_dword(BX_SEG_REG_SS, temp_ESP+20);
raw_ds_selector = (Bit16u) read_virtual_dword(BX_SEG_REG_SS, temp_ESP+24);
raw_fs_selector = (Bit16u) read_virtual_dword(BX_SEG_REG_SS, temp_ESP+28);
raw_gs_selector = (Bit16u) read_virtual_dword(BX_SEG_REG_SS, temp_ESP+32);
raw_es_selector = (Bit16u) read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP+20);
raw_ds_selector = (Bit16u) read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP+24);
raw_fs_selector = (Bit16u) read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP+28);
raw_gs_selector = (Bit16u) read_virtual_dword_32(BX_SEG_REG_SS, temp_ESP+32);
writeEFlags(flags32, EFlagsValidMask);

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: xsave.cc,v 1.10 2008-05-10 13:34:47 sshwarts Exp $
// $Id: xsave.cc,v 1.11 2008-06-12 19:14:40 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008 Stanislav Shwartsman
@ -237,20 +237,23 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::XRSTOR(bxInstruction_c *i)
/* load i387 register file */
for(index=0; index < 8; index++)
{
read_virtual_tword(i->seg(), RMAddr(i)+index*16+32, &(BX_FPU_REG(index)));
floatx80 reg;
reg.fraction = read_virtual_qword(i->seg(), RMAddr(i)+index*16+32);
reg.exp = read_virtual_word (i->seg(), RMAddr(i)+index*16+40);
BX_FPU_REG(index) = reg;
}
/* Restore floating point tag word - see desription for FXRSTOR instruction */
BX_CPU_THIS_PTR the_i387.twd = unpack_FPU_TW(tag_byte);
}
else {
// initialize FPU with reset values
BX_CPU_THIS_PTR the_i387.init();
// initialize FPU with reset values
BX_CPU_THIS_PTR the_i387.init();
for (index=0;index<8;index++) {
BX_CPU_THIS_PTR the_i387.st_space[index].exp = 0;
BX_CPU_THIS_PTR the_i387.st_space[index].fraction = 0;
}
for (index=0;index<8;index++) {
floatx80 reg = { 0, 0 };
BX_FPU_REG(index) = reg;
}
}
}

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: fpu.cc,v 1.42 2008-05-19 20:00:42 sshwarts Exp $
// $Id: fpu.cc,v 1.43 2008-06-12 19:14:40 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2003 Stanislav Shwartsman
@ -406,7 +406,9 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::FRSTOR(bxInstruction_c *i)
/* read all registers in stack order */
for(int n=0;n<8;n++)
{
read_virtual_tword(i->seg(), RMAddr(i) + offset + n*10, &tmp);
tmp.fraction = read_virtual_qword(i->seg(), RMAddr(i) + offset + n*10);
tmp.exp = read_virtual_word (i->seg(), RMAddr(i) + offset + n*10 + 8);
// update tag only if it is not empty
BX_WRITE_FPU_REGISTER_AND_TAG(tmp,
IS_TAG_EMPTY(n) ? FPU_Tag_Empty : FPU_tagof(tmp), n);
@ -428,7 +430,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::FNSAVE(bxInstruction_c *i)
for(int n=0;n<8;n++)
{
floatx80 stn = BX_READ_FPU_REG(n);
write_virtual_tword(i->seg(), RMAddr(i) + offset + n*10, &stn);
write_virtual_qword(i->seg(), RMAddr(i) + offset + n*10, stn.fraction);
write_virtual_word (i->seg(), RMAddr(i) + offset + n*10 + 8, stn.exp);
}
BX_CPU_THIS_PTR the_i387.init();

View File

@ -1,5 +1,5 @@
/////////////////////////////////////////////////////////////////////////
// $Id: fpu_load_store.cc,v 1.24 2008-05-10 13:34:01 sshwarts Exp $
// $Id: fpu_load_store.cc,v 1.25 2008-06-12 19:14:40 sshwarts Exp $
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2003 Stanislav Shwartsman
@ -126,7 +126,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::FLD_EXTENDED_REAL(bxInstruction_c *i)
BX_CPU_THIS_PTR prepareFPU(i);
floatx80 result;
read_virtual_tword(i->seg(), RMAddr(i), &result);
result.fraction = read_virtual_qword(i->seg(), RMAddr(i));
result.exp = read_virtual_word (i->seg(), RMAddr(i)+8);
clear_C1();
@ -380,7 +381,8 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::FSTP_EXTENDED_REAL(bxInstruction_c *i)
save_reg = BX_READ_FPU_REG(0);
}
write_virtual_tword(i->seg(), RMAddr(i), &save_reg);
write_virtual_qword(i->seg(), RMAddr(i), save_reg.fraction);
write_virtual_word (i->seg(), RMAddr(i) + 8, save_reg.exp);
BX_CPU_THIS_PTR the_i387.FPU_pop();
#else