2008-02-13 01:42:47 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
2011-02-25 00:54:04 +03:00
|
|
|
// $Id$
|
2008-02-13 01:42:47 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
//
|
2019-12-20 10:42:07 +03:00
|
|
|
// Copyright (c) 2008-2019 Stanislav Shwartsman
|
2008-02-13 01:42:47 +03:00
|
|
|
// Written by Stanislav Shwartsman [sshwarts at sourceforge net]
|
|
|
|
//
|
|
|
|
// This library is free software; you can redistribute it and/or
|
|
|
|
// modify it under the terms of the GNU Lesser General Public
|
|
|
|
// License as published by the Free Software Foundation; either
|
|
|
|
// version 2 of the License, or (at your option) any later version.
|
|
|
|
//
|
|
|
|
// This library is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
// Lesser General Public License for more details.
|
|
|
|
//
|
|
|
|
// You should have received a copy of the GNU Lesser General Public
|
|
|
|
// License along with this library; if not, write to the Free Software
|
2009-01-16 21:18:59 +03:00
|
|
|
// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA B 02110-1301 USA
|
2008-02-13 01:42:47 +03:00
|
|
|
//
|
|
|
|
/////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
#define NEED_CPU_REG_SHORTCUTS 1
|
|
|
|
#include "bochs.h"
|
|
|
|
#include "cpu.h"
|
2019-12-20 10:42:07 +03:00
|
|
|
#include "msr.h"
|
2008-02-13 01:42:47 +03:00
|
|
|
#define LOG_THIS BX_CPU_THIS_PTR
|
|
|
|
|
2017-10-20 00:27:25 +03:00
|
|
|
#include "decoder/ia_opcodes.h"
|
|
|
|
|
|
|
|
const Bit64u XSAVEC_COMPACTION_ENABLED = BX_CONST64(0x8000000000000000);
|
2014-03-18 00:29:44 +04:00
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
#if BX_CPU_LEVEL >= 6
|
|
|
|
extern XSaveRestoreStateHelper xsave_restore[];
|
|
|
|
#endif
|
|
|
|
|
2019-12-17 22:14:09 +03:00
|
|
|
#if BX_USE_CPU_SMF == 0
|
|
|
|
#define CALL_XSAVE_FN(ptrToFunc) (this->*(ptrToFunc))
|
|
|
|
#else
|
|
|
|
#define CALL_XSAVE_FN(ptrToFunc) (ptrToFunc)
|
|
|
|
#endif
|
|
|
|
|
2008-02-13 01:42:47 +03:00
|
|
|
/* 0F AE /4 */
|
2018-02-16 10:57:32 +03:00
|
|
|
void BX_CPP_AttrRegparmN(1) BX_CPU_C::XSAVE(bxInstruction_c *i)
|
2008-02-13 01:42:47 +03:00
|
|
|
{
|
2010-02-27 01:53:43 +03:00
|
|
|
#if BX_CPU_LEVEL >= 6
|
2008-02-13 19:45:21 +03:00
|
|
|
BX_CPU_THIS_PTR prepareXSAVE();
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool xsaveopt = (i->getIaOpcode() == BX_IA_XSAVEOPT);
|
2017-03-16 00:44:15 +03:00
|
|
|
|
2014-03-17 01:56:30 +04:00
|
|
|
BX_DEBUG(("%s: save processor state XCR0=0x%08x", i->getIaOpcodeNameShort(), BX_CPU_THIS_PTR xcr0.get32()));
|
2008-02-13 19:45:21 +03:00
|
|
|
|
2015-05-17 00:06:59 +03:00
|
|
|
bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2008-08-08 13:22:49 +04:00
|
|
|
bx_address laddr = get_laddr(i->seg(), eaddr);
|
2008-02-13 19:45:21 +03:00
|
|
|
|
2014-03-02 20:40:13 +04:00
|
|
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
|
|
|
if (BX_CPU_THIS_PTR alignment_check()) {
|
|
|
|
if (laddr & 0x3) {
|
2014-03-17 01:56:30 +04:00
|
|
|
BX_ERROR(("%s: access not aligned to 4-byte cause model specific #AC(0)", i->getIaOpcodeNameShort()));
|
2014-03-02 20:40:13 +04:00
|
|
|
exception(BX_AC_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-02-13 19:45:21 +03:00
|
|
|
if (laddr & 0x3f) {
|
2014-03-17 01:56:30 +04:00
|
|
|
BX_ERROR(("%s: access not aligned to 64-byte", i->getIaOpcodeNameShort()));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
|
|
|
|
2010-10-19 02:19:45 +04:00
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
2014-03-17 00:37:47 +04:00
|
|
|
Bit64u xstate_bv = read_virtual_qword(i->seg(), (eaddr + 512) & asize_mask);
|
2009-10-27 23:03:35 +03:00
|
|
|
|
2014-03-17 01:03:13 +04:00
|
|
|
Bit32u requested_feature_bitmap = BX_CPU_THIS_PTR xcr0.get32() & EAX;
|
2014-03-17 01:56:30 +04:00
|
|
|
Bit32u xinuse = get_xinuse_vector(requested_feature_bitmap);
|
|
|
|
|
2008-02-13 19:45:21 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
2014-03-17 01:03:13 +04:00
|
|
|
if ((requested_feature_bitmap & BX_XCR0_FPU_MASK) != 0)
|
2008-02-13 19:45:21 +03:00
|
|
|
{
|
2014-03-17 01:56:30 +04:00
|
|
|
if (! xsaveopt || (xinuse & BX_XCR0_FPU_MASK) != 0)
|
|
|
|
xsave_x87_state(i, eaddr);
|
|
|
|
|
|
|
|
if (xinuse & BX_XCR0_FPU_MASK)
|
2014-03-17 01:03:13 +04:00
|
|
|
xstate_bv |= BX_XCR0_FPU_MASK;
|
|
|
|
else
|
|
|
|
xstate_bv &= ~BX_XCR0_FPU_MASK;
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
2008-02-13 01:42:47 +03:00
|
|
|
|
2008-02-13 19:45:21 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
2014-03-17 01:03:13 +04:00
|
|
|
if ((requested_feature_bitmap & (BX_XCR0_SSE_MASK | BX_XCR0_YMM_MASK)) != 0)
|
2008-02-13 19:45:21 +03:00
|
|
|
{
|
2014-03-18 00:29:44 +04:00
|
|
|
// store MXCSR - write cannot cause any boundary cross because XSAVE image is 64-byte aligned
|
|
|
|
write_virtual_dword(i->seg(), eaddr + 24, BX_MXCSR_REGISTER);
|
|
|
|
write_virtual_dword(i->seg(), eaddr + 28, MXCSR_MASK);
|
2011-10-09 13:19:49 +04:00
|
|
|
}
|
2008-02-13 19:45:21 +03:00
|
|
|
|
2011-10-09 13:19:49 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
2019-12-16 19:14:51 +03:00
|
|
|
for (unsigned feature = xcr0_t::BX_XCR0_SSE_BIT; feature < xcr0_t::BX_XCR0_LAST; feature++)
|
2013-08-28 00:47:24 +04:00
|
|
|
{
|
2019-12-16 19:14:51 +03:00
|
|
|
Bit32u feature_mask = (1 << feature);
|
2014-03-17 01:56:30 +04:00
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
if ((requested_feature_bitmap & feature_mask) != 0)
|
|
|
|
{
|
|
|
|
if (! xsave_restore[feature].len) {
|
|
|
|
BX_ERROR(("%s: feature #%d requested to save but not implemented !", i->getIaOpcodeNameShort(), feature));
|
|
|
|
continue;
|
|
|
|
}
|
2013-08-28 00:47:24 +04:00
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
if (! xsaveopt || (xinuse & feature_mask) != 0) {
|
|
|
|
BX_ASSERT(xsave_restore[feature].xsave_method);
|
2019-12-17 22:14:09 +03:00
|
|
|
CALL_XSAVE_FN(xsave_restore[feature].xsave_method)(i, eaddr+xsave_restore[feature].offset);
|
2019-12-16 19:14:51 +03:00
|
|
|
}
|
2016-03-02 23:44:42 +03:00
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
if (xinuse & feature_mask)
|
|
|
|
xstate_bv |= Bit64u(feature_mask);
|
|
|
|
else
|
|
|
|
xstate_bv &= ~Bit64u(feature_mask);
|
|
|
|
}
|
2016-03-02 23:44:42 +03:00
|
|
|
}
|
|
|
|
|
2009-10-27 23:03:35 +03:00
|
|
|
// always update header to 'dirty' state
|
2014-03-17 00:37:47 +04:00
|
|
|
write_virtual_qword(i->seg(), (eaddr + 512) & asize_mask, xstate_bv);
|
2008-02-13 01:42:47 +03:00
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2008-02-13 01:42:47 +03:00
|
|
|
}
|
|
|
|
|
2014-03-18 00:29:44 +04:00
|
|
|
/* 0F C7 /4 */
|
2018-02-16 10:57:32 +03:00
|
|
|
void BX_CPP_AttrRegparmN(1) BX_CPU_C::XSAVEC(bxInstruction_c *i)
|
2014-03-18 00:29:44 +04:00
|
|
|
{
|
|
|
|
#if BX_CPU_LEVEL >= 6
|
|
|
|
BX_CPU_THIS_PTR prepareXSAVE();
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool xsaves = (i->getIaOpcode() == BX_IA_XSAVES);
|
2017-03-16 00:44:15 +03:00
|
|
|
if (xsaves) {
|
|
|
|
if (CPL != 0) {
|
|
|
|
BX_ERROR(("%s: with CPL != 0", i->getIaOpcodeNameShort()));
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:13:42 +03:00
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2017-03-16 00:44:15 +03:00
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
|
|
|
if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_XSAVES_XRSTORS)) {
|
|
|
|
BX_ERROR(("%s in VMX guest: not allowed to use instruction !", i->getIaOpcodeNameShort()));
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
|
|
|
Bit64u requested_features = (((Bit64u) EDX) << 32) | EAX;
|
2019-12-06 12:23:28 +03:00
|
|
|
if (requested_features & BX_CPU_THIS_PTR msr.ia32_xss & vm->xss_exiting_bitmap)
|
2017-05-31 16:16:49 +03:00
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_XSAVES);
|
2017-03-16 00:44:15 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-12-28 18:20:38 +03:00
|
|
|
BX_DEBUG(("%s: save processor state XCR0=0x%08x XSS=%08x", i->getIaOpcodeNameShort(), BX_CPU_THIS_PTR xcr0.get32(), BX_CPU_THIS_PTR msr.ia32_xss));
|
2014-03-18 00:29:44 +04:00
|
|
|
|
2015-05-17 00:06:59 +03:00
|
|
|
bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2014-03-18 00:29:44 +04:00
|
|
|
bx_address laddr = get_laddr(i->seg(), eaddr);
|
|
|
|
|
|
|
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
|
|
|
if (BX_CPU_THIS_PTR alignment_check()) {
|
|
|
|
if (laddr & 0x3) {
|
|
|
|
BX_ERROR(("%s: access not aligned to 4-byte cause model specific #AC(0)", i->getIaOpcodeNameShort()));
|
|
|
|
exception(BX_AC_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (laddr & 0x3f) {
|
|
|
|
BX_ERROR(("%s: access not aligned to 64-byte", i->getIaOpcodeNameShort()));
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
bx_address asize_mask = i->asize_mask();
|
2014-03-18 00:29:44 +04:00
|
|
|
|
2019-12-28 17:03:54 +03:00
|
|
|
Bit64u xcr0 = (Bit64u) BX_CPU_THIS_PTR xcr0.get32();
|
2017-03-16 00:44:15 +03:00
|
|
|
if (xsaves)
|
2019-12-06 12:23:28 +03:00
|
|
|
xcr0 |= BX_CPU_THIS_PTR msr.ia32_xss;
|
2017-03-16 00:44:15 +03:00
|
|
|
|
|
|
|
Bit32u requested_feature_bitmap = xcr0 & EAX;
|
2014-03-18 00:29:44 +04:00
|
|
|
Bit32u xinuse = get_xinuse_vector(requested_feature_bitmap);
|
|
|
|
Bit64u xstate_bv = requested_feature_bitmap & xinuse;
|
|
|
|
Bit64u xcomp_bv = requested_feature_bitmap | XSAVEC_COMPACTION_ENABLED;
|
|
|
|
|
|
|
|
if ((requested_feature_bitmap & BX_XCR0_FPU_MASK) != 0)
|
|
|
|
{
|
|
|
|
if (xinuse & BX_XCR0_FPU_MASK) {
|
|
|
|
xsave_x87_state(i, eaddr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((requested_feature_bitmap & BX_XCR0_SSE_MASK) != 0)
|
|
|
|
{
|
|
|
|
// write cannot cause any boundary cross because XSAVE image is 64-byte aligned
|
|
|
|
write_virtual_dword(i->seg(), eaddr + 24, BX_MXCSR_REGISTER);
|
|
|
|
write_virtual_dword(i->seg(), eaddr + 28, MXCSR_MASK);
|
|
|
|
}
|
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
Bit32u offset = XSAVE_SSE_STATE_OFFSET;
|
2014-03-18 00:29:44 +04:00
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
|
|
|
for (unsigned feature = xcr0_t::BX_XCR0_SSE_BIT; feature < xcr0_t::BX_XCR0_LAST; feature++)
|
2014-03-18 00:29:44 +04:00
|
|
|
{
|
2019-12-16 19:14:51 +03:00
|
|
|
Bit32u feature_mask = (1 << feature);
|
2014-03-18 00:29:44 +04:00
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
if ((requested_feature_bitmap & feature_mask) != 0)
|
|
|
|
{
|
|
|
|
if (! xsave_restore[feature].len) {
|
|
|
|
BX_ERROR(("%s: feature #%d requested to save but not implemented !", i->getIaOpcodeNameShort(), feature));
|
|
|
|
continue;
|
|
|
|
}
|
2014-03-18 00:29:44 +04:00
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
if (xinuse & feature_mask) {
|
|
|
|
BX_ASSERT(xsave_restore[feature].xsave_method);
|
2019-12-17 22:14:09 +03:00
|
|
|
CALL_XSAVE_FN(xsave_restore[feature].xsave_method)(i, eaddr+offset);
|
2019-12-16 19:14:51 +03:00
|
|
|
}
|
2016-03-02 23:44:42 +03:00
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
offset += xsave_restore[feature].len;
|
|
|
|
}
|
2016-03-02 23:44:42 +03:00
|
|
|
}
|
2014-03-18 00:29:44 +04:00
|
|
|
|
|
|
|
// always update header to 'dirty' state
|
|
|
|
write_virtual_qword(i->seg(), (eaddr + 512) & asize_mask, xstate_bv);
|
|
|
|
write_virtual_qword(i->seg(), (eaddr + 520) & asize_mask, xcomp_bv);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
|
|
|
}
|
|
|
|
|
2008-02-13 01:42:47 +03:00
|
|
|
/* 0F AE /5 */
|
2018-02-16 10:57:32 +03:00
|
|
|
void BX_CPP_AttrRegparmN(1) BX_CPU_C::XRSTOR(bxInstruction_c *i)
|
2008-02-13 01:42:47 +03:00
|
|
|
{
|
2010-02-27 01:53:43 +03:00
|
|
|
#if BX_CPU_LEVEL >= 6
|
2008-02-13 19:45:21 +03:00
|
|
|
BX_CPU_THIS_PTR prepareXSAVE();
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool xrstors = (i->getIaOpcode() == BX_IA_XRSTORS);
|
2017-03-16 00:44:15 +03:00
|
|
|
if (xrstors) {
|
|
|
|
if (CPL != 0) {
|
|
|
|
BX_ERROR(("%s: with CPL != 0", i->getIaOpcodeNameShort()));
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:13:42 +03:00
|
|
|
#if BX_SUPPORT_VMX >= 2
|
2017-03-16 00:44:15 +03:00
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
|
|
|
if (! SECONDARY_VMEXEC_CONTROL(VMX_VM_EXEC_CTRL3_XSAVES_XRSTORS)) {
|
|
|
|
BX_ERROR(("%s in VMX guest: not allowed to use instruction !", i->getIaOpcodeNameShort()));
|
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
VMCS_CACHE *vm = &BX_CPU_THIS_PTR vmcs;
|
|
|
|
Bit64u requested_features = (((Bit64u) EDX) << 32) | EAX;
|
2019-12-06 12:23:28 +03:00
|
|
|
if (requested_features & BX_CPU_THIS_PTR msr.ia32_xss & vm->xss_exiting_bitmap)
|
2017-05-31 16:16:49 +03:00
|
|
|
VMexit_Instruction(i, VMX_VMEXIT_XRSTORS);
|
2017-03-16 00:44:15 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-12-28 18:20:38 +03:00
|
|
|
BX_DEBUG(("%s: restore processor state XCR0=0x%08x XSS=%08x", i->getIaOpcodeNameShort(), BX_CPU_THIS_PTR xcr0.get32(), BX_CPU_THIS_PTR msr.ia32_xss));
|
2008-02-13 19:45:21 +03:00
|
|
|
|
2015-05-17 00:06:59 +03:00
|
|
|
bx_address eaddr = BX_CPU_RESOLVE_ADDR(i);
|
2008-08-08 13:22:49 +04:00
|
|
|
bx_address laddr = get_laddr(i->seg(), eaddr);
|
2008-02-13 19:45:21 +03:00
|
|
|
|
2014-03-02 20:40:13 +04:00
|
|
|
#if BX_SUPPORT_ALIGNMENT_CHECK && BX_CPU_LEVEL >= 4
|
|
|
|
if (BX_CPU_THIS_PTR alignment_check()) {
|
|
|
|
if (laddr & 0x3) {
|
2017-03-16 00:44:15 +03:00
|
|
|
BX_ERROR(("%s: access not aligned to 4-byte cause model specific #AC(0)", i->getIaOpcodeNameShort()));
|
2014-03-02 20:40:13 +04:00
|
|
|
exception(BX_AC_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-02-13 19:45:21 +03:00
|
|
|
if (laddr & 0x3f) {
|
2017-03-16 00:44:15 +03:00
|
|
|
BX_ERROR(("%s: access not aligned to 64-byte", i->getIaOpcodeNameShort()));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
|
|
|
|
2010-10-19 02:19:45 +04:00
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
2014-03-17 00:37:47 +04:00
|
|
|
Bit64u xstate_bv = read_virtual_qword(i->seg(), (eaddr + 512) & asize_mask);
|
2014-03-18 00:29:44 +04:00
|
|
|
Bit64u xcomp_bv = read_virtual_qword(i->seg(), (eaddr + 520) & asize_mask);
|
2010-10-19 02:19:45 +04:00
|
|
|
Bit64u header3 = read_virtual_qword(i->seg(), (eaddr + 528) & asize_mask);
|
2008-02-13 19:45:21 +03:00
|
|
|
|
2014-03-18 00:29:44 +04:00
|
|
|
if (header3 != 0) {
|
2019-12-28 16:36:43 +03:00
|
|
|
BX_ERROR(("%s: Reserved header3 state is not '0", i->getIaOpcodeNameShort()));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool compaction = (xcomp_bv & XSAVEC_COMPACTION_ENABLED) != 0;
|
2014-03-18 00:29:44 +04:00
|
|
|
|
|
|
|
if (! BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_XSAVEC) || ! compaction) {
|
|
|
|
if (xcomp_bv != 0) {
|
2019-12-28 16:36:43 +03:00
|
|
|
BX_ERROR(("%s: Reserved header2 state is not '0", i->getIaOpcodeNameShort()));
|
2014-03-18 00:29:44 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-28 17:03:54 +03:00
|
|
|
Bit64u xcr0 = (Bit64u) BX_CPU_THIS_PTR xcr0.get32();
|
2017-03-16 00:44:15 +03:00
|
|
|
if (xrstors)
|
2019-12-06 12:23:28 +03:00
|
|
|
xcr0 |= BX_CPU_THIS_PTR msr.ia32_xss;
|
2017-03-16 00:44:15 +03:00
|
|
|
|
2014-03-18 00:29:44 +04:00
|
|
|
if (! compaction) {
|
2019-12-28 17:03:54 +03:00
|
|
|
if (xrstors) {
|
|
|
|
BX_ERROR(("XRSTORS require compaction XCOMP_BV[63] to be set"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((~xcr0 & xstate_bv) != 0) {
|
2017-03-16 00:44:15 +03:00
|
|
|
BX_ERROR(("%s: Invalid xsave_bv state", i->getIaOpcodeNameShort()));
|
2014-03-18 00:29:44 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
2019-12-28 18:08:53 +03:00
|
|
|
if ((~xcr0 & xcomp_bv & ~XSAVEC_COMPACTION_ENABLED) != 0) {
|
2017-03-16 00:44:15 +03:00
|
|
|
BX_ERROR(("%s: Invalid xcomp_bv state", i->getIaOpcodeNameShort()));
|
2014-03-18 00:29:44 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xstate_bv & ~xcomp_bv) {
|
2019-12-28 18:20:38 +03:00
|
|
|
BX_ERROR(("%s: xstate_bv set a bit which is not in xcomp_bv state", i->getIaOpcodeNameShort()));
|
2014-03-18 00:29:44 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
Bit64u header4 = read_virtual_qword(i->seg(), (eaddr + 536) & asize_mask);
|
|
|
|
Bit64u header5 = read_virtual_qword(i->seg(), (eaddr + 544) & asize_mask);
|
|
|
|
Bit64u header6 = read_virtual_qword(i->seg(), (eaddr + 552) & asize_mask);
|
|
|
|
Bit64u header7 = read_virtual_qword(i->seg(), (eaddr + 560) & asize_mask);
|
|
|
|
Bit64u header8 = read_virtual_qword(i->seg(), (eaddr + 568) & asize_mask);
|
|
|
|
|
|
|
|
if (header4 | header5 | header6 | header7 | header8) {
|
2019-12-28 16:36:43 +03:00
|
|
|
BX_ERROR(("%s: Reserved header4_header7 state is not '0", i->getIaOpcodeNameShort()));
|
2014-03-18 00:29:44 +04:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
|
|
|
|
2017-03-16 00:44:15 +03:00
|
|
|
Bit32u requested_feature_bitmap = xcr0 & EAX;
|
2011-10-09 13:19:49 +04:00
|
|
|
|
2008-02-13 19:45:21 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
2014-03-17 01:03:13 +04:00
|
|
|
if ((requested_feature_bitmap & BX_XCR0_FPU_MASK) != 0)
|
2008-02-13 19:45:21 +03:00
|
|
|
{
|
2014-03-17 00:37:47 +04:00
|
|
|
if (xstate_bv & BX_XCR0_FPU_MASK)
|
|
|
|
xrstor_x87_state(i, eaddr);
|
|
|
|
else
|
|
|
|
xrstor_init_x87_state();
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
2014-03-18 00:29:44 +04:00
|
|
|
if ((requested_feature_bitmap & BX_XCR0_SSE_MASK) != 0 ||
|
|
|
|
((requested_feature_bitmap & BX_XCR0_YMM_MASK) != 0 && ! compaction))
|
2008-02-13 19:45:21 +03:00
|
|
|
{
|
2014-03-18 00:29:44 +04:00
|
|
|
// read cannot cause any boundary cross because XSAVE image is 64-byte aligned
|
|
|
|
Bit32u new_mxcsr = read_virtual_dword(i->seg(), eaddr + 24);
|
2008-02-13 19:45:21 +03:00
|
|
|
if(new_mxcsr & ~MXCSR_MASK)
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2009-10-18 21:33:35 +04:00
|
|
|
BX_MXCSR_REGISTER = new_mxcsr;
|
2011-10-09 13:19:49 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
2014-03-17 01:03:13 +04:00
|
|
|
if ((requested_feature_bitmap & BX_XCR0_SSE_MASK) != 0)
|
2011-10-09 13:19:49 +04:00
|
|
|
{
|
2014-03-17 00:37:47 +04:00
|
|
|
if (xstate_bv & BX_XCR0_SSE_MASK)
|
2014-03-05 01:06:29 +04:00
|
|
|
xrstor_sse_state(i, eaddr+XSAVE_SSE_STATE_OFFSET);
|
|
|
|
else
|
|
|
|
xrstor_init_sse_state();
|
2011-03-19 23:09:34 +03:00
|
|
|
}
|
|
|
|
|
2014-03-18 00:29:44 +04:00
|
|
|
if (compaction) {
|
|
|
|
Bit32u offset = XSAVE_YMM_STATE_OFFSET;
|
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
2019-12-16 19:14:51 +03:00
|
|
|
for (unsigned feature = xcr0_t::BX_XCR0_YMM_BIT; feature < xcr0_t::BX_XCR0_LAST; feature++)
|
2014-03-18 00:29:44 +04:00
|
|
|
{
|
2019-12-16 19:14:51 +03:00
|
|
|
Bit32u feature_mask = (1 << feature);
|
|
|
|
|
|
|
|
if ((requested_feature_bitmap & feature_mask) != 0)
|
|
|
|
{
|
|
|
|
if (! xsave_restore[feature].len) {
|
|
|
|
BX_ERROR(("%s: feature #%d requested to restore but not implemented !", i->getIaOpcodeNameShort(), feature));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xstate_bv & feature_mask) {
|
|
|
|
BX_ASSERT(xsave_restore[feature].xrstor_method);
|
2019-12-17 22:14:09 +03:00
|
|
|
CALL_XSAVE_FN(xsave_restore[feature].xrstor_method)(i, eaddr+offset);
|
2019-12-16 19:14:51 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
BX_ASSERT(xsave_restore[feature].xrstor_init_method);
|
2019-12-17 22:14:09 +03:00
|
|
|
CALL_XSAVE_FN(xsave_restore[feature].xrstor_init_method)();
|
2019-12-16 19:14:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
offset += xsave_restore[feature].len;
|
|
|
|
}
|
2016-03-02 23:44:42 +03:00
|
|
|
}
|
2013-08-28 00:47:24 +04:00
|
|
|
}
|
2014-03-18 00:29:44 +04:00
|
|
|
else {
|
2013-08-28 00:47:24 +04:00
|
|
|
|
2014-03-18 00:29:44 +04:00
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
2019-12-16 19:14:51 +03:00
|
|
|
for (unsigned feature = xcr0_t::BX_XCR0_YMM_BIT; feature < xcr0_t::BX_XCR0_LAST; feature++)
|
2014-03-18 00:29:44 +04:00
|
|
|
{
|
2019-12-16 19:14:51 +03:00
|
|
|
Bit32u feature_mask = (1 << feature);
|
|
|
|
|
|
|
|
if ((requested_feature_bitmap & feature_mask) != 0)
|
|
|
|
{
|
|
|
|
if (! xsave_restore[feature].len) {
|
|
|
|
BX_ERROR(("%s: feature #%d requested to restore but not implemented !", i->getIaOpcodeNameShort(), feature));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (xstate_bv & feature_mask) {
|
|
|
|
BX_ASSERT(xsave_restore[feature].xrstor_method);
|
2019-12-17 22:14:09 +03:00
|
|
|
CALL_XSAVE_FN(xsave_restore[feature].xrstor_method)(i, eaddr+xsave_restore[feature].offset);
|
2019-12-16 19:14:51 +03:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
BX_ASSERT(xsave_restore[feature].xrstor_init_method);
|
2019-12-17 22:14:09 +03:00
|
|
|
CALL_XSAVE_FN(xsave_restore[feature].xrstor_init_method)();
|
2019-12-16 19:14:51 +03:00
|
|
|
}
|
|
|
|
}
|
2016-03-02 23:44:42 +03:00
|
|
|
}
|
2014-03-18 00:29:44 +04:00
|
|
|
}
|
2019-12-28 15:57:31 +03:00
|
|
|
|
|
|
|
#if BX_SUPPORT_PKEYS
|
|
|
|
// take effect of changing the PKRU state
|
|
|
|
if ((requested_feature_bitmap & BX_XCR0_PKRU_MASK) != 0) {
|
2020-05-29 15:35:30 +03:00
|
|
|
set_PKeys(TMP32, BX_CPU_THIS_PTR pkrs);
|
2019-12-28 15:57:31 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-03-18 00:29:44 +04:00
|
|
|
#endif // BX_CPU_LEVEL >= 6
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2008-02-13 01:42:47 +03:00
|
|
|
}
|
|
|
|
|
2014-03-05 01:06:29 +04:00
|
|
|
#if BX_CPU_LEVEL >= 6
|
|
|
|
|
2014-03-17 00:37:47 +04:00
|
|
|
// x87 state management //
|
|
|
|
|
|
|
|
void BX_CPU_C::xsave_x87_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
BxPackedXmmRegister xmm;
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
xmm.xmm16u(0) = BX_CPU_THIS_PTR the_i387.get_control_word();
|
|
|
|
xmm.xmm16u(1) = BX_CPU_THIS_PTR the_i387.get_status_word();
|
|
|
|
xmm.xmm16u(2) = pack_FPU_TW(BX_CPU_THIS_PTR the_i387.get_tag_word());
|
|
|
|
|
|
|
|
/* x87 FPU Opcode (16 bits) */
|
|
|
|
/* The lower 11 bits contain the FPU opcode, upper 5 bits are reserved */
|
|
|
|
xmm.xmm16u(3) = BX_CPU_THIS_PTR the_i387.foo;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* x87 FPU IP Offset (32/64 bits)
|
|
|
|
* The contents of this field differ depending on the current
|
|
|
|
* addressing mode (16/32/64 bit) when the FXSAVE instruction was executed:
|
|
|
|
* + 64-bit mode - 64-bit IP offset
|
|
|
|
* + 32-bit mode - 32-bit IP offset
|
|
|
|
* + 16-bit mode - low 16 bits are IP offset; high 16 bits are reserved.
|
|
|
|
* x87 CS FPU IP Selector
|
|
|
|
* + 16 bit, in 16/32 bit mode only
|
|
|
|
*/
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (i->os64L()) {
|
|
|
|
xmm.xmm64u(1) = (BX_CPU_THIS_PTR the_i387.fip);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
xmm.xmm32u(2) = (Bit32u)(BX_CPU_THIS_PTR the_i387.fip);
|
|
|
|
xmm.xmm32u(3) = x87_get_FCS();
|
|
|
|
}
|
|
|
|
|
2014-03-24 00:01:58 +04:00
|
|
|
write_virtual_xmmword(i->seg(), offset, &xmm);
|
2014-03-17 00:37:47 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* x87 FPU Instruction Operand (Data) Pointer Offset (32/64 bits)
|
|
|
|
* The contents of this field differ depending on the current
|
|
|
|
* addressing mode (16/32 bit) when the FXSAVE instruction was executed:
|
|
|
|
* + 64-bit mode - 64-bit offset
|
|
|
|
* + 32-bit mode - 32-bit offset
|
|
|
|
* + 16-bit mode - low 16 bits are offset; high 16 bits are reserved.
|
|
|
|
* x87 DS FPU Instruction Operand (Data) Pointer Selector
|
|
|
|
* + 16 bit, in 16/32 bit mode only
|
|
|
|
*/
|
|
|
|
#if BX_SUPPORT_X86_64
|
2014-03-18 00:29:44 +04:00
|
|
|
// write cannot cause any boundary cross because XSAVE image is 64-byte aligned
|
2014-03-17 00:37:47 +04:00
|
|
|
if (i->os64L()) {
|
2014-03-18 00:29:44 +04:00
|
|
|
write_virtual_qword(i->seg(), offset + 16, BX_CPU_THIS_PTR the_i387.fdp);
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
2014-03-18 00:29:44 +04:00
|
|
|
write_virtual_dword(i->seg(), offset + 16, (Bit32u) BX_CPU_THIS_PTR the_i387.fdp);
|
|
|
|
write_virtual_dword(i->seg(), offset + 20, x87_get_FDS());
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
/* do not touch MXCSR state */
|
|
|
|
|
|
|
|
/* store i387 register file */
|
|
|
|
for(unsigned index=0; index < 8; index++)
|
|
|
|
{
|
|
|
|
const floatx80 &fp = BX_READ_FPU_REG(index);
|
|
|
|
|
|
|
|
xmm.xmm64u(0) = fp.fraction;
|
|
|
|
xmm.xmm64u(1) = 0;
|
|
|
|
xmm.xmm16u(4) = fp.exp;
|
|
|
|
|
2014-03-24 00:01:58 +04:00
|
|
|
write_virtual_xmmword(i->seg(), (offset+index*16+32) & asize_mask, &xmm);
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::xrstor_x87_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
BxPackedXmmRegister xmm;
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
// load FPU state from XSAVE area
|
2014-03-24 00:01:58 +04:00
|
|
|
read_virtual_xmmword(i->seg(), offset, &xmm);
|
2014-03-17 00:37:47 +04:00
|
|
|
|
|
|
|
BX_CPU_THIS_PTR the_i387.cwd = xmm.xmm16u(0);
|
|
|
|
BX_CPU_THIS_PTR the_i387.swd = xmm.xmm16u(1);
|
|
|
|
BX_CPU_THIS_PTR the_i387.tos = (xmm.xmm16u(1) >> 11) & 0x07;
|
|
|
|
|
|
|
|
/* always set bit 6 as '1 */
|
|
|
|
BX_CPU_THIS_PTR the_i387.cwd =
|
|
|
|
(BX_CPU_THIS_PTR the_i387.cwd & ~FPU_CW_Reserved_Bits) | 0x0040;
|
|
|
|
|
|
|
|
/* Restore x87 FPU Opcode */
|
|
|
|
/* The lower 11 bits contain the FPU opcode, upper 5 bits are reserved */
|
|
|
|
BX_CPU_THIS_PTR the_i387.foo = xmm.xmm16u(3) & 0x7FF;
|
|
|
|
|
|
|
|
/* Restore x87 FPU IP */
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (i->os64L()) {
|
|
|
|
BX_CPU_THIS_PTR the_i387.fip = xmm.xmm64u(1);
|
|
|
|
BX_CPU_THIS_PTR the_i387.fcs = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
BX_CPU_THIS_PTR the_i387.fip = xmm.xmm32u(2);
|
|
|
|
BX_CPU_THIS_PTR the_i387.fcs = xmm.xmm16u(6);
|
|
|
|
}
|
|
|
|
|
|
|
|
Bit32u tag_byte = xmm.xmmubyte(4);
|
|
|
|
|
2014-03-18 00:29:44 +04:00
|
|
|
/* Restore x87 FPU DP - read cannot cause any boundary cross because XSAVE image is 64-byte aligned */
|
2014-03-24 00:01:58 +04:00
|
|
|
read_virtual_xmmword(i->seg(), offset + 16, &xmm);
|
2014-03-17 00:37:47 +04:00
|
|
|
|
|
|
|
#if BX_SUPPORT_X86_64
|
|
|
|
if (i->os64L()) {
|
|
|
|
BX_CPU_THIS_PTR the_i387.fdp = xmm.xmm64u(0);
|
|
|
|
BX_CPU_THIS_PTR the_i387.fds = 0;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
BX_CPU_THIS_PTR the_i387.fdp = xmm.xmm32u(0);
|
|
|
|
BX_CPU_THIS_PTR the_i387.fds = xmm.xmm16u(2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* load i387 register file */
|
|
|
|
for(unsigned index=0; index < 8; index++)
|
|
|
|
{
|
|
|
|
floatx80 reg;
|
|
|
|
reg.fraction = read_virtual_qword(i->seg(), (offset+index*16+32) & asize_mask);
|
|
|
|
reg.exp = read_virtual_word (i->seg(), (offset+index*16+40) & asize_mask);
|
|
|
|
|
|
|
|
// update tag only if it is not empty
|
|
|
|
BX_WRITE_FPU_REGISTER_AND_TAG(reg,
|
|
|
|
IS_TAG_EMPTY(index) ? FPU_Tag_Empty : FPU_tagof(reg), index);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Restore floating point tag word - see desription for FXRSTOR instruction */
|
|
|
|
BX_CPU_THIS_PTR the_i387.twd = unpack_FPU_TW(tag_byte);
|
|
|
|
|
|
|
|
/* check for unmasked exceptions */
|
|
|
|
if (FPU_PARTIAL_STATUS & ~FPU_CONTROL_WORD & FPU_CW_Exceptions_Mask) {
|
|
|
|
/* set the B and ES bits in the status-word */
|
|
|
|
FPU_PARTIAL_STATUS |= FPU_SW_Summary | FPU_SW_Backward;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* clear the B and ES bits in the status-word */
|
|
|
|
FPU_PARTIAL_STATUS &= ~(FPU_SW_Summary | FPU_SW_Backward);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-28 16:11:13 +03:00
|
|
|
void BX_CPU_C::xrstor_init_x87_state(void)
|
2014-03-17 00:37:47 +04:00
|
|
|
{
|
|
|
|
// initialize FPU with reset values
|
|
|
|
BX_CPU_THIS_PTR the_i387.init();
|
|
|
|
|
|
|
|
for (unsigned index=0;index<8;index++) {
|
|
|
|
static floatx80 reg = { 0, 0 };
|
|
|
|
BX_FPU_REG(index) = reg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool BX_CPU_C::xsave_x87_state_xinuse(void)
|
2014-03-17 00:37:47 +04:00
|
|
|
{
|
|
|
|
if (BX_CPU_THIS_PTR the_i387.get_control_word() != 0x037F ||
|
|
|
|
BX_CPU_THIS_PTR the_i387.get_status_word() != 0 ||
|
|
|
|
BX_CPU_THIS_PTR the_i387.get_tag_word() != 0xFFFF ||
|
|
|
|
BX_CPU_THIS_PTR the_i387.foo != 0 ||
|
|
|
|
BX_CPU_THIS_PTR the_i387.fip != 0 || BX_CPU_THIS_PTR the_i387.fcs != 0 ||
|
2019-12-09 19:44:36 +03:00
|
|
|
BX_CPU_THIS_PTR the_i387.fdp != 0 || BX_CPU_THIS_PTR the_i387.fds != 0) return true;
|
2014-03-17 00:37:47 +04:00
|
|
|
|
|
|
|
for (unsigned index=0;index<8;index++) {
|
|
|
|
floatx80 reg = BX_FPU_REG(index);
|
2019-12-09 19:44:36 +03:00
|
|
|
if (reg.exp != 0 || reg.fraction != 0) return true;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
|
2019-12-09 19:44:36 +03:00
|
|
|
return false;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
// SSE state management //
|
|
|
|
|
2014-03-05 01:06:29 +04:00
|
|
|
void BX_CPU_C::xsave_sse_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
/* store XMM register file */
|
|
|
|
for(unsigned index=0; index < 16; index++) {
|
|
|
|
// save XMM8-XMM15 only in 64-bit mode
|
|
|
|
if (index < 8 || long64_mode()) {
|
2014-03-24 00:01:58 +04:00
|
|
|
write_virtual_xmmword(i->seg(), (offset + index*16) & asize_mask, &BX_READ_XMM_REG(index));
|
2014-03-05 01:06:29 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::xrstor_sse_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
// load SSE state from XSAVE area
|
|
|
|
for(unsigned index=0; index < 16; index++) {
|
|
|
|
// restore XMM8-XMM15 only in 64-bit mode
|
|
|
|
if (index < 8 || long64_mode()) {
|
2014-03-24 00:01:58 +04:00
|
|
|
read_virtual_xmmword(i->seg(), (offset+index*16) & asize_mask, &BX_READ_XMM_REG(index));
|
2014-03-05 01:06:29 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-28 16:11:13 +03:00
|
|
|
void BX_CPU_C::xrstor_init_sse_state(void)
|
2014-03-05 01:06:29 +04:00
|
|
|
{
|
|
|
|
// initialize SSE with reset values
|
|
|
|
for(unsigned index=0; index < 16; index++) {
|
|
|
|
// set XMM8-XMM15 only in 64-bit mode
|
|
|
|
if (index < 8 || long64_mode()) BX_CLEAR_XMM_REG(index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool BX_CPU_C::xsave_sse_state_xinuse(void)
|
2014-03-17 00:37:47 +04:00
|
|
|
{
|
|
|
|
for(unsigned index=0; index < 16; index++) {
|
|
|
|
// set XMM8-XMM15 only in 64-bit mode
|
|
|
|
if (index < 8 || long64_mode()) {
|
|
|
|
const BxPackedXmmRegister *reg = &BX_XMM_REG(index);
|
2019-12-09 19:44:36 +03:00
|
|
|
if (! is_clear(reg)) return true;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-09 19:44:36 +03:00
|
|
|
return false;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
#if BX_SUPPORT_AVX
|
|
|
|
|
|
|
|
// YMM state management //
|
|
|
|
|
|
|
|
void BX_CPU_C::xsave_ymm_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
/* store AVX state */
|
|
|
|
for(unsigned index=0; index < 16; index++) {
|
|
|
|
// save YMM8-YMM15 only in 64-bit mode
|
|
|
|
if (index < 8 || long64_mode()) {
|
2014-03-24 00:01:58 +04:00
|
|
|
write_virtual_xmmword(i->seg(), (offset + index*16) & asize_mask, &BX_READ_AVX_REG_LANE(index, 1));
|
2014-02-23 01:00:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::xrstor_ymm_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
// load AVX state from XSAVE area
|
|
|
|
for(unsigned index=0; index < 16; index++) {
|
|
|
|
// restore YMM8-YMM15 only in 64-bit mode
|
|
|
|
if (index < 8 || long64_mode()) {
|
2014-03-24 00:01:58 +04:00
|
|
|
read_virtual_xmmword(i->seg(), (offset + index*16) & asize_mask, &BX_READ_AVX_REG_LANE(index, 1));
|
2014-02-23 01:00:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-28 16:11:13 +03:00
|
|
|
void BX_CPU_C::xrstor_init_ymm_state(void)
|
2014-02-23 01:00:47 +04:00
|
|
|
{
|
|
|
|
// initialize upper part of AVX registers with reset values
|
|
|
|
for(unsigned index=0; index < 16; index++) {
|
|
|
|
// set YMM8-YMM15 only in 64-bit mode
|
|
|
|
if (index < 8 || long64_mode()) BX_CLEAR_AVX_HIGH128(index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool BX_CPU_C::xsave_ymm_state_xinuse(void)
|
2014-03-17 00:37:47 +04:00
|
|
|
{
|
|
|
|
for(unsigned index=0; index < 16; index++) {
|
|
|
|
// set YMM8-YMM15 only in 64-bit mode
|
|
|
|
if (index < 8 || long64_mode()) {
|
|
|
|
const BxPackedXmmRegister *reg = &BX_READ_AVX_REG_LANE(index, 1);
|
2019-12-09 19:44:36 +03:00
|
|
|
if (! is_clear(reg)) return true;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-09 19:44:36 +03:00
|
|
|
return false;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
#if BX_SUPPORT_EVEX
|
|
|
|
|
|
|
|
// Opmask state management //
|
|
|
|
|
|
|
|
void BX_CPU_C::xsave_opmask_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
// save OPMASK state to XSAVE area
|
|
|
|
for(unsigned index=0; index < 8; index++) {
|
|
|
|
write_virtual_qword(i->seg(), (offset+index*8) & asize_mask, BX_READ_OPMASK(index));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::xrstor_opmask_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
// load opmask registers from XSAVE area
|
|
|
|
for(unsigned index=0; index < 8; index++) {
|
|
|
|
Bit64u opmask = read_virtual_qword(i->seg(), (offset+index*8) & asize_mask);
|
|
|
|
BX_WRITE_OPMASK(index, opmask);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-28 16:11:13 +03:00
|
|
|
void BX_CPU_C::xrstor_init_opmask_state(void)
|
2014-02-23 01:00:47 +04:00
|
|
|
{
|
|
|
|
// initialize opmask registers with reset values
|
|
|
|
for(unsigned index=0; index < 8; index++) {
|
|
|
|
BX_WRITE_OPMASK(index, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool BX_CPU_C::xsave_opmask_state_xinuse(void)
|
2014-03-17 00:37:47 +04:00
|
|
|
{
|
|
|
|
for(unsigned index=0; index < 8; index++) {
|
2019-12-09 19:44:36 +03:00
|
|
|
if (BX_READ_OPMASK(index)) return true;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
|
2019-12-09 19:44:36 +03:00
|
|
|
return false;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
// ZMM_HI256 (upper part of zmm0..zmm15 registers) state management //
|
|
|
|
|
2019-12-28 18:08:53 +03:00
|
|
|
// In 64-bit mode, ZMM_Hi256 state is in its initial configuration if each of ZMM0_H-ZMM15_H is 0.
|
|
|
|
// Outside 64-bit mode, ZMM_Hi256 state is in its initial configuration if each of ZMM0_H-ZMM7_H is 0.
|
|
|
|
// An execution of XRSTOR or XRSTORS outside 64-bit mode does not update ZMM8_H-ZMM15_H.
|
2019-12-28 15:57:31 +03:00
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
void BX_CPU_C::xsave_zmm_hi256_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
2019-12-28 16:11:13 +03:00
|
|
|
unsigned num_regs = long64_mode() ? 16 : 8;
|
2019-12-28 15:57:31 +03:00
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
// save upper part of ZMM registers to XSAVE area
|
2019-12-28 15:57:31 +03:00
|
|
|
for(unsigned index=0; index < num_regs; index++) {
|
2014-03-24 00:01:58 +04:00
|
|
|
write_virtual_ymmword(i->seg(), (offset+index*32) & asize_mask, &BX_READ_ZMM_REG_HI(index));
|
2014-02-23 01:00:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::xrstor_zmm_hi256_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
2019-12-28 16:11:13 +03:00
|
|
|
unsigned num_regs = long64_mode() ? 16 : 8;
|
2019-12-28 15:57:31 +03:00
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
// load upper part of ZMM registers from XSAVE area
|
2019-12-28 15:57:31 +03:00
|
|
|
for(unsigned index=0; index < num_regs; index++) {
|
2014-03-24 00:01:58 +04:00
|
|
|
read_virtual_ymmword(i->seg(), (offset+index*32) & asize_mask, &BX_READ_ZMM_REG_HI(index));
|
2014-02-23 01:00:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-28 16:11:13 +03:00
|
|
|
void BX_CPU_C::xrstor_init_zmm_hi256_state(void)
|
2014-02-23 01:00:47 +04:00
|
|
|
{
|
2019-12-28 16:11:13 +03:00
|
|
|
unsigned num_regs = long64_mode() ? 16 : 8;
|
2019-12-28 15:57:31 +03:00
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
// initialize upper part of ZMM registers with reset values
|
2019-12-28 15:57:31 +03:00
|
|
|
for(unsigned index=0; index < num_regs; index++) {
|
2014-02-23 01:00:47 +04:00
|
|
|
BX_CLEAR_AVX_HIGH256(index);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool BX_CPU_C::xsave_zmm_hi256_state_xinuse(void)
|
2014-03-17 00:37:47 +04:00
|
|
|
{
|
2019-12-28 16:11:13 +03:00
|
|
|
unsigned num_regs = long64_mode() ? 16 : 8;
|
2019-12-28 15:57:31 +03:00
|
|
|
|
|
|
|
for(unsigned index=0; index < num_regs; index++) {
|
2014-03-17 00:37:47 +04:00
|
|
|
for (unsigned n=2; n < 4; n++) {
|
|
|
|
const BxPackedXmmRegister *reg = &BX_READ_AVX_REG_LANE(index, n);
|
2019-12-09 19:44:36 +03:00
|
|
|
if (! is_clear(reg)) return true;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-09 19:44:36 +03:00
|
|
|
return false;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
// HI_ZMM (zmm15..zmm31) state management //
|
|
|
|
|
2019-12-28 18:08:53 +03:00
|
|
|
// In 64-bit mode, Hi16_ZMM state is in its initial configuration if each of ZMM16-ZMM31 is 0.
|
2019-12-28 15:57:31 +03:00
|
|
|
// Outside 64-bit mode, Hi16_ZMM state is always in its initial configuration.
|
2019-12-28 18:08:53 +03:00
|
|
|
// An execution of XRSTOR or XRSTORS outside 64-bit mode does not update ZMM16-ZMM31.
|
2019-12-28 15:57:31 +03:00
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
void BX_CPU_C::xsave_hi_zmm_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
2019-12-28 16:11:13 +03:00
|
|
|
if (!long64_mode()) return;
|
2019-12-28 15:57:31 +03:00
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
// save high ZMM state to XSAVE area
|
|
|
|
for(unsigned index=0; index < 16; index++) {
|
2014-03-24 00:01:58 +04:00
|
|
|
write_virtual_zmmword(i->seg(), (offset+index*64) & asize_mask, &BX_READ_AVX_REG(index+16));
|
2014-02-23 01:00:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::xrstor_hi_zmm_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
2019-12-28 16:11:13 +03:00
|
|
|
if (!long64_mode()) return;
|
2019-12-28 15:57:31 +03:00
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
// load high ZMM state from XSAVE area
|
|
|
|
for(unsigned index=0; index < 16; index++) {
|
2014-03-24 00:01:58 +04:00
|
|
|
read_virtual_zmmword(i->seg(), (offset+index*64) & asize_mask, &BX_READ_AVX_REG(index+16));
|
2014-02-23 01:00:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-28 16:11:13 +03:00
|
|
|
void BX_CPU_C::xrstor_init_hi_zmm_state(void)
|
2014-02-23 01:00:47 +04:00
|
|
|
{
|
2019-12-28 16:11:13 +03:00
|
|
|
if (!long64_mode()) return;
|
2019-12-28 15:57:31 +03:00
|
|
|
|
2014-02-23 01:00:47 +04:00
|
|
|
// initialize high ZMM registers with reset values
|
|
|
|
for(unsigned index=16; index < 32; index++) {
|
|
|
|
BX_CLEAR_AVX_REG(index);
|
|
|
|
}
|
|
|
|
}
|
2014-03-17 00:37:47 +04:00
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool BX_CPU_C::xsave_hi_zmm_state_xinuse(void)
|
2014-03-17 00:37:47 +04:00
|
|
|
{
|
2019-12-28 16:11:13 +03:00
|
|
|
if (!long64_mode()) return true;
|
2019-12-28 15:57:31 +03:00
|
|
|
|
2014-03-17 00:37:47 +04:00
|
|
|
for(unsigned index=16; index < 32; index++) {
|
|
|
|
for (unsigned n=0; n < 4; n++) {
|
|
|
|
const BxPackedXmmRegister *reg = &BX_READ_AVX_REG_LANE(index, n);
|
2019-12-09 19:44:36 +03:00
|
|
|
if (! is_clear(reg)) return true;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-09 19:44:36 +03:00
|
|
|
return false;
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
2014-02-23 01:00:47 +04:00
|
|
|
#endif // BX_SUPPORT_EVEX
|
|
|
|
|
2014-03-05 01:06:29 +04:00
|
|
|
#endif // BX_SUPPORT_AVX
|
|
|
|
|
2016-03-02 23:44:42 +03:00
|
|
|
#if BX_SUPPORT_PKEYS
|
|
|
|
// PKRU state management //
|
|
|
|
void BX_CPU_C::xsave_pkru_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
write_virtual_qword(i->seg(), offset, (Bit64u) BX_CPU_THIS_PTR pkru);
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::xrstor_pkru_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
2020-05-29 15:35:30 +03:00
|
|
|
// just write the pkru to TMP register for now and don't call set_PKeys
|
2019-12-28 15:57:31 +03:00
|
|
|
// calling it will take immediate effect on all future memory accesses including load of other XRSTOR components
|
|
|
|
TMP32 = read_virtual_dword(i->seg(), offset);
|
2016-03-02 23:44:42 +03:00
|
|
|
}
|
|
|
|
|
2019-12-28 16:11:13 +03:00
|
|
|
void BX_CPU_C::xrstor_init_pkru_state(void)
|
2016-03-02 23:44:42 +03:00
|
|
|
{
|
2020-05-29 15:35:30 +03:00
|
|
|
// just write the pkru to TMP register for now and don't call set_PKeys
|
2019-12-28 15:57:31 +03:00
|
|
|
// calling it will take immediate effect on all future memory accesses including load of other XRSTOR components
|
|
|
|
TMP32 = 0;
|
2016-03-02 23:44:42 +03:00
|
|
|
}
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool BX_CPU_C::xsave_pkru_state_xinuse(void)
|
2016-03-02 23:44:42 +03:00
|
|
|
{
|
|
|
|
return (BX_CPU_THIS_PTR pkru != 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-12-20 10:42:07 +03:00
|
|
|
#if BX_SUPPORT_CET
|
|
|
|
// CET U state management //
|
|
|
|
void BX_CPU_C::xsave_cet_u_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
write_virtual_qword(i->seg(), offset, BX_CPU_THIS_PTR msr.ia32_cet_control[1]);
|
|
|
|
write_virtual_qword(i->seg(), (offset + 8) & asize_mask, BX_CPU_THIS_PTR msr.ia32_pl_ssp[3]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::xrstor_cet_u_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
Bit64u ctrl = read_virtual_qword(i->seg(), offset);
|
|
|
|
Bit64u ia32_pl3_ssp = read_virtual_qword(i->seg(), (offset + 8) & asize_mask);
|
|
|
|
|
|
|
|
// XRSTOR on CET state does all reserved bits and canonicality check like WRMSR would do
|
|
|
|
wrmsr(BX_MSR_IA32_U_CET, ctrl);
|
|
|
|
wrmsr(BX_MSR_IA32_PL3_SSP, ia32_pl3_ssp);
|
|
|
|
}
|
|
|
|
|
2019-12-28 16:11:13 +03:00
|
|
|
void BX_CPU_C::xrstor_init_cet_u_state(void)
|
2019-12-20 10:42:07 +03:00
|
|
|
{
|
|
|
|
BX_CPU_THIS_PTR msr.ia32_cet_control[1] = 0;
|
|
|
|
BX_CPU_THIS_PTR msr.ia32_pl_ssp[3] = 0;
|
|
|
|
}
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool BX_CPU_C::xsave_cet_u_state_xinuse(void)
|
2019-12-20 10:42:07 +03:00
|
|
|
{
|
|
|
|
return BX_CPU_THIS_PTR msr.ia32_cet_control[1] == 0 &&
|
|
|
|
BX_CPU_THIS_PTR msr.ia32_pl_ssp[3] == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// CET S state management //
|
|
|
|
void BX_CPU_C::xsave_cet_s_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
write_virtual_qword(i->seg(), offset, BX_CPU_THIS_PTR msr.ia32_pl_ssp[0]);
|
|
|
|
write_virtual_qword(i->seg(), (offset + 8) & asize_mask, BX_CPU_THIS_PTR msr.ia32_pl_ssp[1]);
|
|
|
|
write_virtual_qword(i->seg(), (offset + 16) & asize_mask, BX_CPU_THIS_PTR msr.ia32_pl_ssp[2]);
|
|
|
|
}
|
|
|
|
|
|
|
|
void BX_CPU_C::xrstor_cet_s_state(bxInstruction_c *i, bx_address offset)
|
|
|
|
{
|
|
|
|
bx_address asize_mask = i->asize_mask();
|
|
|
|
|
|
|
|
Bit64u ia32_pl0_ssp = read_virtual_qword(i->seg(), offset);
|
|
|
|
Bit64u ia32_pl1_ssp = read_virtual_qword(i->seg(), (offset + 8) & asize_mask);
|
|
|
|
Bit64u ia32_pl2_ssp = read_virtual_qword(i->seg(), (offset + 16) & asize_mask);
|
|
|
|
|
|
|
|
// XRSTOR on CET state does all reserved bits and canonicality check like WRMSR would do
|
|
|
|
wrmsr(BX_MSR_IA32_PL0_SSP, ia32_pl0_ssp);
|
|
|
|
wrmsr(BX_MSR_IA32_PL1_SSP, ia32_pl1_ssp);
|
|
|
|
wrmsr(BX_MSR_IA32_PL2_SSP, ia32_pl2_ssp);
|
|
|
|
}
|
|
|
|
|
2019-12-28 16:11:13 +03:00
|
|
|
void BX_CPU_C::xrstor_init_cet_s_state(void)
|
2019-12-20 10:42:07 +03:00
|
|
|
{
|
|
|
|
for (unsigned n=0;n<3;n++)
|
|
|
|
BX_CPU_THIS_PTR msr.ia32_pl_ssp[n] = 0;
|
|
|
|
}
|
|
|
|
|
2021-01-30 11:35:35 +03:00
|
|
|
bool BX_CPU_C::xsave_cet_s_state_xinuse(void)
|
2019-12-20 10:42:07 +03:00
|
|
|
{
|
|
|
|
for (unsigned n=0;n<3;n++)
|
|
|
|
return BX_CPU_THIS_PTR msr.ia32_pl_ssp[n] != 0;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-03-17 01:56:30 +04:00
|
|
|
Bit32u BX_CPU_C::get_xinuse_vector(Bit32u requested_feature_bitmap)
|
|
|
|
{
|
|
|
|
Bit32u xinuse = 0;
|
|
|
|
|
2019-12-16 19:14:51 +03:00
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
|
|
|
for (unsigned feature = xcr0_t::BX_XCR0_FPU_BIT; feature < xcr0_t::BX_XCR0_LAST; feature++)
|
|
|
|
{
|
|
|
|
Bit32u feature_mask = (1 << feature);
|
|
|
|
|
|
|
|
if ((requested_feature_bitmap & feature_mask) != 0)
|
|
|
|
{
|
|
|
|
if (! xsave_restore[feature].len) {
|
|
|
|
BX_ERROR(("get_xinuse_vector(0x%08x): feature #%d requested but not implemented !", requested_feature_bitmap, feature));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
BX_ASSERT(xsave_restore[feature].xstate_in_use_method);
|
2019-12-17 22:14:09 +03:00
|
|
|
if (CALL_XSAVE_FN(xsave_restore[feature].xstate_in_use_method)())
|
2019-12-16 19:14:51 +03:00
|
|
|
xinuse |= feature_mask;
|
|
|
|
}
|
2014-03-17 01:56:30 +04:00
|
|
|
}
|
2019-12-16 19:14:51 +03:00
|
|
|
|
|
|
|
/////////////////////////////////////////////////////////////////////////////
|
2014-03-17 01:56:30 +04:00
|
|
|
if (requested_feature_bitmap & BX_XCR0_SSE_MASK) {
|
2019-12-16 19:14:51 +03:00
|
|
|
if (BX_MXCSR_REGISTER != MXCSR_RESET)
|
2014-03-17 01:56:30 +04:00
|
|
|
xinuse |= BX_XCR0_SSE_MASK;
|
|
|
|
}
|
2016-03-02 23:44:42 +03:00
|
|
|
|
2014-03-17 01:56:30 +04:00
|
|
|
return xinuse;
|
|
|
|
}
|
|
|
|
|
2014-03-05 01:06:29 +04:00
|
|
|
#endif // BX_CPU_LEVEL >= 6
|
2014-02-23 01:00:47 +04:00
|
|
|
|
2008-02-13 01:42:47 +03:00
|
|
|
/* 0F 01 D0 */
|
2018-02-16 10:57:32 +03:00
|
|
|
void BX_CPP_AttrRegparmN(1) BX_CPU_C::XGETBV(bxInstruction_c *i)
|
2008-02-13 01:42:47 +03:00
|
|
|
{
|
2010-02-27 01:53:43 +03:00
|
|
|
#if BX_CPU_LEVEL >= 6
|
2009-12-05 00:27:17 +03:00
|
|
|
if(! BX_CPU_THIS_PTR cr4.get_OSXSAVE()) {
|
2008-02-13 19:45:21 +03:00
|
|
|
BX_ERROR(("XGETBV: OSXSAVE feature is not enabled in CR4!"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
2008-02-13 01:42:47 +03:00
|
|
|
|
2008-02-13 19:45:21 +03:00
|
|
|
// For now hardcoded handle only XCR0 register, it should take a few
|
|
|
|
// years until extension will be required
|
2014-03-17 00:37:47 +04:00
|
|
|
|
2008-02-13 19:45:21 +03:00
|
|
|
if (ECX != 0) {
|
2014-03-17 00:37:47 +04:00
|
|
|
if (ECX == 1 && BX_CPUID_SUPPORT_ISA_EXTENSION(BX_ISA_XSAVEC)) {
|
2014-03-17 01:56:30 +04:00
|
|
|
// Executing XGETBV with ECX = 1 returns in EDX:EAX the logical-AND of XCR0
|
|
|
|
// and the current value of the XINUSE state-component bitmap.
|
|
|
|
// If XINUSE[i]=0, state component [i] is in its initial configuration.
|
2014-03-17 00:37:47 +04:00
|
|
|
RDX = 0;
|
2014-03-17 01:56:30 +04:00
|
|
|
RAX = get_xinuse_vector(BX_CPU_THIS_PTR xcr0.get32());
|
2014-03-17 00:37:47 +04:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
BX_ERROR(("XGETBV: Invalid XCR%d register", ECX));
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
RDX = 0;
|
|
|
|
RAX = BX_CPU_THIS_PTR xcr0.get32();
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
2008-02-13 01:42:47 +03:00
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
BX_NEXT_INSTR(i);
|
2008-02-13 01:42:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* 0F 01 D1 */
|
2018-02-16 10:57:32 +03:00
|
|
|
void BX_CPP_AttrRegparmN(1) BX_CPU_C::XSETBV(bxInstruction_c *i)
|
2008-02-13 01:42:47 +03:00
|
|
|
{
|
2010-02-27 01:53:43 +03:00
|
|
|
#if BX_CPU_LEVEL >= 6
|
2009-12-05 00:27:17 +03:00
|
|
|
if(! BX_CPU_THIS_PTR cr4.get_OSXSAVE()) {
|
2008-02-13 19:45:21 +03:00
|
|
|
BX_ERROR(("XSETBV: OSXSAVE feature is not enabled in CR4!"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_UD_EXCEPTION, 0);
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
|
|
|
|
2011-07-30 00:22:35 +04:00
|
|
|
#if BX_SUPPORT_VMX
|
|
|
|
if (BX_CPU_THIS_PTR in_vmx_guest) {
|
2012-07-26 20:03:26 +04:00
|
|
|
VMexit(VMX_VMEXIT_XSETBV, 0);
|
2011-07-30 00:22:35 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-12-27 00:51:57 +04:00
|
|
|
#if BX_SUPPORT_SVM
|
|
|
|
if (BX_CPU_THIS_PTR in_svm_guest) {
|
2011-12-27 23:42:11 +04:00
|
|
|
if (SVM_INTERCEPT(SVM_INTERCEPT1_XSETBV)) Svm_Vmexit(SVM_VMEXIT_XSETBV);
|
2011-12-27 00:51:57 +04:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2011-08-24 01:25:34 +04:00
|
|
|
// CPL is always 3 in vm8086 mode
|
|
|
|
if (/* v8086_mode() || */ CPL != 0) {
|
2008-02-13 19:45:21 +03:00
|
|
|
BX_ERROR(("XSETBV: The current priveledge level is not 0"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// For now hardcoded handle only XCR0 register, it should take a few
|
|
|
|
// years until extension will be required
|
|
|
|
if (ECX != 0) {
|
2014-03-17 00:37:47 +04:00
|
|
|
BX_ERROR(("XSETBV: Invalid XCR%d register", ECX));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
2008-02-13 01:42:47 +03:00
|
|
|
|
2012-12-23 20:54:18 +04:00
|
|
|
if (EDX != 0 || (EAX & ~BX_CPU_THIS_PTR xcr0_suppmask) != 0 || (EAX & BX_XCR0_FPU_MASK) == 0) {
|
2014-01-28 16:57:38 +04:00
|
|
|
BX_ERROR(("XSETBV: Attempt to change reserved bits"));
|
2010-03-14 18:51:27 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
2008-02-13 19:45:21 +03:00
|
|
|
}
|
2008-02-13 01:42:47 +03:00
|
|
|
|
2011-03-20 21:27:31 +03:00
|
|
|
#if BX_SUPPORT_AVX
|
2013-07-24 00:51:52 +04:00
|
|
|
if ((EAX & (BX_XCR0_YMM_MASK | BX_XCR0_SSE_MASK)) == BX_XCR0_YMM_MASK) {
|
2014-01-28 16:57:38 +04:00
|
|
|
BX_ERROR(("XSETBV: Attempt to enable AVX without SSE"));
|
2011-03-20 21:27:31 +03:00
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-01-28 16:57:38 +04:00
|
|
|
#if BX_SUPPORT_EVEX
|
|
|
|
if (EAX & (BX_XCR0_OPMASK_MASK | BX_XCR0_ZMM_HI256_MASK | BX_XCR0_HI_ZMM_MASK)) {
|
|
|
|
Bit32u avx512_state_mask = (BX_XCR0_FPU_MASK | BX_XCR0_SSE_MASK | BX_XCR0_YMM_MASK | BX_XCR0_OPMASK_MASK | BX_XCR0_ZMM_HI256_MASK | BX_XCR0_HI_ZMM_MASK);
|
|
|
|
if ((EAX & avx512_state_mask) != avx512_state_mask) {
|
|
|
|
BX_ERROR(("XSETBV: Illegal attempt to enable AVX-512 state"));
|
|
|
|
exception(BX_GP_EXCEPTION, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-12-06 13:21:55 +03:00
|
|
|
BX_CPU_THIS_PTR xcr0.set32(EAX);
|
2011-03-19 23:09:34 +03:00
|
|
|
|
|
|
|
#if BX_SUPPORT_AVX
|
|
|
|
handleAvxModeChange();
|
|
|
|
#endif
|
2011-07-07 00:01:18 +04:00
|
|
|
|
|
|
|
#endif // BX_CPU_LEVEL >= 6
|
|
|
|
|
|
|
|
BX_NEXT_TRACE(i);
|
2008-02-13 01:42:47 +03:00
|
|
|
}
|