keep def of YMM/ZMM register even if AVX or EVEX are not compiled in and let reading/writing them to MEM

This commit is contained in:
Stanislav Shwartsman 2018-04-04 19:31:56 +00:00
parent 8c9f7f54b6
commit fd15b61d94
8 changed files with 12 additions and 100 deletions

View File

@ -7,8 +7,8 @@ Changes after 2.6.9 release:
- Bugfixes for CPU emulation correctness (critical bugfixes for PCID, ADCX/ADOX, MOVBE, AVX/AVX-512 and VMX emulation)
! x87: implemented FOPCODE and FDP deprecation features
! AVX-512: implemented AVX-512 VBMI2/VNNI/BITALG instructions
! Implemented VAES instructions / VPCLMULQDQ instruction
! Implemented GFNI instructions
! Crypto: Implemented VAES instructions / VPCLMULQDQ / GFNI instruction
! VMX: Implement EPT-Based Sub-Page Protection
! CPUID Added Skylake-X CPU definition with AVX-512 support
- Bochs Debugger and Instrumentation

View File

@ -2,7 +2,7 @@
// $Id$
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2015 Stanislav Shwartsman
// Copyright (c) 2015-2018 Stanislav Shwartsman
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
//
// This library is free software; you can redistribute it and/or
@ -68,8 +68,6 @@ BX_CPU_C::write_virtual_xmmword_aligned_32(unsigned s, Bit32u offset, const BxPa
write_linear_xmmword_aligned(s, laddr, data);
}
#if BX_SUPPORT_AVX
BX_CPP_INLINE void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_virtual_ymmword_32(unsigned s, Bit32u offset, const BxPackedYmmRegister *data)
{
@ -84,10 +82,6 @@ BX_CPU_C::write_virtual_ymmword_aligned_32(unsigned s, Bit32u offset, const BxPa
write_linear_ymmword_aligned(s, laddr, data);
}
#endif // BX_SUPPORT_AVX
#if BX_SUPPORT_EVEX
BX_CPP_INLINE void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_virtual_zmmword_32(unsigned s, Bit32u offset, const BxPackedZmmRegister *data)
{
@ -102,8 +96,6 @@ BX_CPU_C::write_virtual_zmmword_aligned_32(unsigned s, Bit32u offset, const BxPa
write_linear_zmmword_aligned(s, laddr, data);
}
#endif // BX_SUPPORT_EVEX
#endif // BX_CPU_LEVEL >= 6
BX_CPP_INLINE void BX_CPP_AttrRegparmN(2)
@ -157,8 +149,6 @@ BX_CPU_C::read_virtual_xmmword_aligned_32(unsigned s, Bit32u offset, BxPackedXmm
read_linear_xmmword_aligned(s, laddr, data);
}
#if BX_SUPPORT_AVX
BX_CPP_INLINE void BX_CPP_AttrRegparmN(3)
BX_CPU_C::read_virtual_ymmword_32(unsigned s, Bit32u offset, BxPackedYmmRegister *data)
{
@ -173,10 +163,6 @@ BX_CPU_C::read_virtual_ymmword_aligned_32(unsigned s, Bit32u offset, BxPackedYmm
read_linear_ymmword_aligned(s, laddr, data);
}
#endif // BX_SUPPORT_AVX
#if BX_SUPPORT_EVEX
BX_CPP_INLINE void BX_CPP_AttrRegparmN(3)
BX_CPU_C::read_virtual_zmmword_32(unsigned s, Bit32u offset, BxPackedZmmRegister *data)
{
@ -191,8 +177,6 @@ BX_CPU_C::read_virtual_zmmword_aligned_32(unsigned s, Bit32u offset, BxPackedZmm
read_linear_zmmword_aligned(s, laddr, data);
}
#endif // BX_SUPPORT_EVEX
#endif // BX_CPU_LEVEL >= 6
BX_CPP_INLINE void BX_CPP_AttrRegparmN(3)
@ -239,8 +223,6 @@ BX_CPU_C::write_virtual_xmmword_aligned(unsigned s, bx_address offset, const BxP
write_linear_xmmword_aligned(s, laddr, data);
}
#if BX_SUPPORT_AVX
BX_CPP_INLINE void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_virtual_ymmword(unsigned s, bx_address offset, const BxPackedYmmRegister *data)
{
@ -255,10 +237,6 @@ BX_CPU_C::write_virtual_ymmword_aligned(unsigned s, bx_address offset, const BxP
write_linear_ymmword_aligned(s, laddr, data);
}
#endif // BX_SUPPORT_AVX
#if BX_SUPPORT_EVEX
BX_CPP_INLINE void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_virtual_zmmword(unsigned s, bx_address offset, const BxPackedZmmRegister *data)
{
@ -273,8 +251,6 @@ BX_CPU_C::write_virtual_zmmword_aligned(unsigned s, bx_address offset, const BxP
write_linear_zmmword_aligned(s, laddr, data);
}
#endif // BX_SUPPORT_EVEX
#endif // BX_CPU_LEVEL >= 6
BX_CPP_INLINE void BX_CPP_AttrRegparmN(2)
@ -328,8 +304,6 @@ BX_CPU_C::read_virtual_xmmword_aligned(unsigned s, bx_address offset, BxPackedXm
read_linear_xmmword_aligned(s, laddr, data);
}
#if BX_SUPPORT_AVX
BX_CPP_INLINE void BX_CPP_AttrRegparmN(3)
BX_CPU_C::read_virtual_ymmword(unsigned s, bx_address offset, BxPackedYmmRegister *data)
{
@ -344,10 +318,6 @@ BX_CPU_C::read_virtual_ymmword_aligned(unsigned s, bx_address offset, BxPackedYm
read_linear_ymmword_aligned(s, laddr, data);
}
#endif // BX_SUPPORT_AVX
#if BX_SUPPORT_EVEX
BX_CPP_INLINE void BX_CPP_AttrRegparmN(3)
BX_CPU_C::read_virtual_zmmword(unsigned s, bx_address offset, BxPackedZmmRegister *data)
{
@ -362,8 +332,6 @@ BX_CPU_C::read_virtual_zmmword_aligned(unsigned s, bx_address offset, BxPackedZm
read_linear_zmmword_aligned(s, laddr, data);
}
#endif // BX_SUPPORT_EVEX
#endif // BX_CPU_LEVEL >= 6
//////////////////////////////////////////////////////////////

View File

@ -2,7 +2,7 @@
// $Id$
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2008-2015 Stanislav Shwartsman
// Copyright (c) 2008-2018 Stanislav Shwartsman
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
//
// This library is free software; you can redistribute it and/or
@ -191,7 +191,6 @@ BX_CPU_C::write_linear_xmmword_aligned(unsigned s, bx_address laddr, const BxPac
exception(int_number(s), 0);
}
#if BX_SUPPORT_AVX
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_linear_ymmword(unsigned s, bx_address laddr, const BxPackedYmmRegister *data)
{
@ -248,9 +247,7 @@ BX_CPU_C::write_linear_ymmword_aligned(unsigned s, bx_address laddr, const BxPac
if (access_write_linear(laddr, 32, CPL, 0x0, (void *) data) < 0)
exception(int_number(s), 0);
}
#endif
#if BX_SUPPORT_EVEX
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::write_linear_zmmword(unsigned s, bx_address laddr, const BxPackedZmmRegister *data)
{
@ -307,7 +304,6 @@ BX_CPU_C::write_linear_zmmword_aligned(unsigned s, bx_address laddr, const BxPac
if (access_write_linear(laddr, 64, CPL, 0x0, (void *) data) < 0)
exception(int_number(s), 0);
}
#endif
#endif
@ -510,7 +506,6 @@ BX_CPU_C::read_linear_xmmword_aligned(unsigned s, bx_address laddr, BxPackedXmmR
exception(int_number(s), 0);
}
#if BX_SUPPORT_AVX
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::read_linear_ymmword(unsigned s, bx_address laddr, BxPackedYmmRegister *data)
{
@ -563,9 +558,7 @@ BX_CPU_C::read_linear_ymmword_aligned(unsigned s, bx_address laddr, BxPackedYmmR
if (access_read_linear(laddr, 32, CPL, BX_READ, 0x0, (void *) data) < 0)
exception(int_number(s), 0);
}
#endif
#if BX_SUPPORT_EVEX
void BX_CPP_AttrRegparmN(3)
BX_CPU_C::read_linear_zmmword(unsigned s, bx_address laddr, BxPackedZmmRegister *data)
{
@ -618,7 +611,6 @@ BX_CPU_C::read_linear_zmmword_aligned(unsigned s, bx_address laddr, BxPackedZmmR
if (access_read_linear(laddr, 64, CPL, BX_READ, 0x0, (void *) data) < 0)
exception(int_number(s), 0);
}
#endif
#endif

View File

@ -2,7 +2,7 @@
// $Id$
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2001-2017 The Bochs Project
// Copyright (C) 2001-2018 The Bochs Project
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@ -4391,14 +4391,10 @@ public: // for now...
#if BX_CPU_LEVEL >= 6
BX_SMF void read_linear_xmmword(unsigned seg, bx_address off, BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_linear_xmmword_aligned(unsigned seg, bx_address off, BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
#if BX_SUPPORT_AVX
BX_SMF void read_linear_ymmword(unsigned seg, bx_address off, BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_linear_ymmword_aligned(unsigned seg, bx_address off, BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#if BX_SUPPORT_EVEX
BX_SMF void read_linear_zmmword(unsigned seg, bx_address off, BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_linear_zmmword_aligned(unsigned seg, bx_address off, BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#endif
BX_SMF void write_linear_byte(unsigned seg, bx_address offset, Bit8u data) BX_CPP_AttrRegparmN(3);
@ -4408,14 +4404,10 @@ public: // for now...
#if BX_CPU_LEVEL >= 6
BX_SMF void write_linear_xmmword(unsigned seg, bx_address offset, const BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_linear_xmmword_aligned(unsigned seg, bx_address offset, const BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
#if BX_SUPPORT_AVX
BX_SMF void write_linear_ymmword(unsigned seg, bx_address off, const BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_linear_ymmword_aligned(unsigned seg, bx_address off, const BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#if BX_SUPPORT_EVEX
BX_SMF void write_linear_zmmword(unsigned seg, bx_address off, const BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_linear_zmmword_aligned(unsigned seg, bx_address off, const BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#endif
BX_SMF void tickle_read_linear(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);
@ -4429,14 +4421,10 @@ public: // for now...
#if BX_CPU_LEVEL >= 6
BX_SMF void read_virtual_xmmword_32(unsigned seg, Bit32u off, BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_xmmword_aligned_32(unsigned seg, Bit32u off, BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
#if BX_SUPPORT_AVX
BX_SMF void read_virtual_ymmword_32(unsigned seg, Bit32u off, BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_ymmword_aligned_32(unsigned seg, Bit32u off, BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#if BX_SUPPORT_EVEX
BX_SMF void read_virtual_zmmword_32(unsigned seg, Bit32u off, BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_zmmword_aligned_32(unsigned seg, Bit32u off, BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#endif
BX_SMF void write_virtual_byte_32(unsigned seg, Bit32u offset, Bit8u data) BX_CPP_AttrRegparmN(3);
@ -4446,14 +4434,10 @@ public: // for now...
#if BX_CPU_LEVEL >= 6
BX_SMF void write_virtual_xmmword_32(unsigned seg, Bit32u offset, const BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_xmmword_aligned_32(unsigned seg, Bit32u offset, const BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
#if BX_SUPPORT_AVX
BX_SMF void write_virtual_ymmword_32(unsigned seg, Bit32u off, const BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_ymmword_aligned_32(unsigned seg, Bit32u off, const BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#if BX_SUPPORT_EVEX
BX_SMF void write_virtual_zmmword_32(unsigned seg, Bit32u off, const BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_zmmword_aligned_32(unsigned seg, Bit32u off, const BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#endif
BX_SMF Bit8u read_virtual_byte(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);
@ -4463,14 +4447,10 @@ public: // for now...
#if BX_CPU_LEVEL >= 6
BX_SMF void read_virtual_xmmword(unsigned seg, bx_address off, BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_xmmword_aligned(unsigned seg, bx_address off, BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
#if BX_SUPPORT_AVX
BX_SMF void read_virtual_ymmword(unsigned seg, bx_address off, BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_ymmword_aligned(unsigned seg, bx_address off, BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#if BX_SUPPORT_EVEX
BX_SMF void read_virtual_zmmword(unsigned seg, bx_address off, BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void read_virtual_zmmword_aligned(unsigned seg, bx_address off, BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#endif
BX_SMF void write_virtual_byte(unsigned seg, bx_address offset, Bit8u data) BX_CPP_AttrRegparmN(3);
@ -4480,14 +4460,10 @@ public: // for now...
#if BX_CPU_LEVEL >= 6
BX_SMF void write_virtual_xmmword(unsigned seg, bx_address offset, const BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_xmmword_aligned(unsigned seg, bx_address offset, const BxPackedXmmRegister *data) BX_CPP_AttrRegparmN(3);
#if BX_SUPPORT_AVX
BX_SMF void write_virtual_ymmword(unsigned seg, bx_address off, const BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_ymmword_aligned(unsigned seg, bx_address off, const BxPackedYmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#if BX_SUPPORT_EVEX
BX_SMF void write_virtual_zmmword(unsigned seg, bx_address off, const BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
BX_SMF void write_virtual_zmmword_aligned(unsigned seg, bx_address off, const BxPackedZmmRegister *data) BX_CPP_AttrRegparmN(3);
#endif
#endif
BX_SMF Bit8u read_RMW_linear_byte(unsigned seg, bx_address offset) BX_CPP_AttrRegparmN(2);

View File

@ -2,7 +2,7 @@
// $Id$
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2010-2017 Stanislav Shwartsman
// Copyright (c) 2010-2018 Stanislav Shwartsman
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
//
// This library is free software; you can redistribute it and/or

View File

@ -2,7 +2,7 @@
// $Id$
/////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2016-2017 The Bochs Project
// Copyright (C) 2016-2018 The Bochs Project
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
@ -54,6 +54,7 @@ enum {
BX_ISA_POPCNT, /* POPCNT instruction */
BX_ISA_MONITOR_MWAIT, /* MONITOR/MWAIT instruction */
BX_ISA_MONITORX_MWAITX, /* MONITORX/MWAITX instruction (AMD) */
BX_ISA_WAITPKG, /* TPAUSE/UMONITOR/UMWAIT instructions */
BX_ISA_VMX, /* VMX instruction */
BX_ISA_SMX, /* SMX instruction */
BX_ISA_LONG_MODE, /* Long Mode (x86-64) support */
@ -119,6 +120,8 @@ enum {
BX_ISA_RDPID, /* RDPID Support */
BX_ISA_TCE, /* Translation Cache Extensions (TCE) support (AMD) */
BX_ISA_CLZERO, /* CLZERO instruction support (AMD) */
BX_ISA_MOVDIRI, /* MOVDIRI instruction support */
BX_ISA_MOVDIRI64, /* MOVDIRI64 instruction support */
BX_ISA_EXTENSION_LAST
};

View File

@ -308,30 +308,11 @@ void BX_CPP_AttrRegparmN(1) BX_CPU_C::CLZERO(bxInstruction_c *i)
{
bx_address eaddr = RAX & ~BX_CONST64(CACHE_LINE_SIZE-1) & i->asize_mask();
#if BX_SUPPORT_EVEX
BxPackedZmmRegister zmmzero;
BxPackedZmmRegister zmmzero; // zmm is always made available even if EVEX is not compiled in
zmmzero.clear();
for (unsigned n=0; n<CACHE_LINE_SIZE; n += 64) {
write_virtual_zmmword(i->seg(), eaddr+n, &zmmzero);
}
#elif BX_SUPPORT_AVX
BxPackedYmmRegister ymmzero;
ymmzero.clear();
for (unsigned n=0; n<CACHE_LINE_SIZE; n += 32) {
write_virtual_ymmword(i->seg(), eaddr+n, &ymmzero);
}
#elif BX_CPU_LEVEL >= 6
BxPackedXmmRegister xmmzero;
xmmzero.clear();
for (unsigned n=0; n<CACHE_LINE_SIZE; n += 16) {
write_virtual_xmmword(i->seg(), eaddr+n, &xmmzero);
}
#else
Bit64u val_64 = 0;
for (unsigned n=0; n<CACHE_LINE_SIZE; n += 8) {
write_virtual_qword(i->seg(), eaddr+n, val_64);
}
#endif
}
void BX_CPU_C::handleCpuModeChange(void)

View File

@ -2,7 +2,7 @@
// $Id$
/////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2003-2017 Stanislav Shwartsman
// Copyright (c) 2003-2018 Stanislav Shwartsman
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
//
// This library is free software; you can redistribute it and/or
@ -65,8 +65,6 @@ union bx_xmm_reg_t {
/* AVX REGISTER */
#if BX_SUPPORT_AVX
typedef
#if defined(_MSC_VER) && (_MSC_VER>=1300)
__declspec(align(32))
@ -110,10 +108,6 @@ union bx_ymm_reg_t {
#define ymm128(i) ymm_v128[(i)]
#endif
#endif
#if BX_SUPPORT_EVEX
typedef
#if defined(_MSC_VER) && (_MSC_VER>=1300)
__declspec(align(64))
@ -160,8 +154,6 @@ union bx_zmm_reg_t {
#define zmm256(i) zmm_v256[(i)]
#endif
#endif
#if BX_SUPPORT_EVEX
# define vmm64s(i) zmm64s(i)
# define vmm32s(i) zmm32s(i)