implementation of AVX-512 compressed displacement feature which is required for AVX-512 emu correctness (first step). todo: fix rest of EVEX opcodes
This commit is contained in:
parent
4e7a0e06f8
commit
9613e4b402
@ -4114,6 +4114,9 @@ public: // for now...
|
||||
BX_SMF int fetchDecode64(const Bit8u *fetchPtr, Bit32u fetchModeMask, bxInstruction_c *i, unsigned remainingInPage) BX_CPP_AttrRegparmN(3);
|
||||
#endif
|
||||
BX_SMF void boundaryFetch(const Bit8u *fetchPtr, unsigned remainingInPage, bxInstruction_c *);
|
||||
#if BX_SUPPORT_EVEX
|
||||
BX_SMF unsigned evex_displ8_compression(bxInstruction_c *i, unsigned ia_opcode, unsigned type, unsigned vex_w);
|
||||
#endif
|
||||
BX_SMF Bit16u WalkOpcodeTables(const BxOpcodeInfo_t *op, Bit16u &attr, unsigned modrm, unsigned sse_prefix, unsigned osize, unsigned vex_vl, bx_bool vex_w);
|
||||
BX_SMF char* disasm(const Bit8u *opcode, bool is_32, bool is_64, char *disbufptr, bxInstruction_c *i, bx_address cs_base = 0, bx_address rip = 0);
|
||||
|
||||
|
@ -236,20 +236,21 @@ char* disasm(char *disbufptr, const bxInstruction_c *i, bx_address cs_base, bx_a
|
||||
unsigned src = (unsigned) BxOpcodesTable[ia_opcode].src[n];
|
||||
unsigned src_type = src >> 3;
|
||||
unsigned src_index = src & 0x7;
|
||||
if (! src_type && src != BX_SRC_RM) continue;
|
||||
if (! src_type && src != BX_SRC_RM && src != BX_SRC_EVEX_RM) continue;
|
||||
if (srcs_used++ > 0)
|
||||
disbufptr = dis_sprintf(disbufptr, ", ");
|
||||
|
||||
if (! i->modC0() && (src_index == BX_SRC_RM || src_index == BX_SRC_VSIB)) {
|
||||
if (! i->modC0() && (src_index == BX_SRC_RM || src_index == BX_SRC_EVEX_RM || src_index == BX_SRC_VSIB)) {
|
||||
disbufptr = resolve_memref(disbufptr, i, src_index);
|
||||
#if BX_SUPPORT_EVEX
|
||||
// EVEX.z is ignored for memory destination forms
|
||||
if (n == 0 && src_type == BX_VMM_REG && i->opmask()) {
|
||||
if (n == 0 && (src_index == BX_SRC_EVEX_RM || src_type == BX_VMM_REG) && i->opmask()) {
|
||||
disbufptr = dis_sprintf(disbufptr, "{k%d}", i->opmask());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
else {
|
||||
if (src_index == BX_SRC_EVEX_RM) src_type = BX_VMM_REG;
|
||||
unsigned srcreg = i->getSrcReg(n);
|
||||
if (src_type < 0x10) {
|
||||
switch(src_type) {
|
||||
|
@ -1289,6 +1289,9 @@ BX_CPU_C::fetchDecode32(const Bit8u *iptr, Bit32u fetchModeMask, bxInstruction_c
|
||||
int had_vex_xop = 0, vvv = -1;
|
||||
bx_bool use_vvv = 0;
|
||||
#endif
|
||||
#if BX_SUPPORT_EVEX
|
||||
bx_bool displ8 = 0;
|
||||
#endif
|
||||
|
||||
os_32 = is_32 = fetchModeMask & BX_FETCH_MODE_IS32_MASK;
|
||||
|
||||
@ -1615,6 +1618,9 @@ fetch_b1:
|
||||
if (remain != 0) {
|
||||
// 8 sign extended to 32
|
||||
i->modRMForm.displ32u = (Bit8s) *iptr++;
|
||||
#if BX_SUPPORT_EVEX
|
||||
displ8 = 1;
|
||||
#endif
|
||||
remain--;
|
||||
goto modrm_done;
|
||||
}
|
||||
@ -1658,6 +1664,9 @@ fetch_b1:
|
||||
if (remain != 0) {
|
||||
// 8 sign extended to 16
|
||||
i->modRMForm.displ16u = (Bit8s) *iptr++;
|
||||
#if BX_SUPPORT_EVEX
|
||||
displ8 = 1;
|
||||
#endif
|
||||
remain--;
|
||||
goto modrm_done;
|
||||
}
|
||||
@ -1854,8 +1863,32 @@ modrm_done:
|
||||
}
|
||||
else {
|
||||
i->setSrcReg(n, (type == BX_VMM_REG) ? BX_VECTOR_TMP_REGISTER : BX_TMP_REGISTER);
|
||||
#if BX_SUPPORT_EVEX
|
||||
if (b1 == 0x62 && type == BX_GPR32 && displ8) {
|
||||
if (i->as32L())
|
||||
i->modRMForm.displ32u *= 4;
|
||||
else
|
||||
i->modRMForm.displ16u *= 4;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
#if BX_SUPPORT_EVEX
|
||||
case BX_SRC_EVEX_RM:
|
||||
if (! mod_mem) {
|
||||
i->setSrcReg(n, rm);
|
||||
}
|
||||
else {
|
||||
i->setSrcReg(n, BX_VECTOR_TMP_REGISTER);
|
||||
if (displ8) {
|
||||
if (i->as32L())
|
||||
i->modRMForm.displ32u *= evex_displ8_compression(i, ia_opcode, type, vex_w);
|
||||
else
|
||||
i->modRMForm.displ16u *= evex_displ8_compression(i, ia_opcode, type, vex_w);
|
||||
}
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
#if BX_SUPPORT_AVX
|
||||
case BX_SRC_VVV:
|
||||
i->setSrcReg(n, vvv);
|
||||
@ -1868,7 +1901,15 @@ modrm_done:
|
||||
if (! i->as32L() || i->sibIndex() == BX_NIL_REGISTER) {
|
||||
ia_opcode = BX_IA_ERROR;
|
||||
}
|
||||
#if BX_SUPPORT_EVEX
|
||||
if (displ8) {
|
||||
if (i->as32L())
|
||||
i->modRMForm.displ32u *= 4 << vex_w;
|
||||
else
|
||||
i->modRMForm.displ16u *= 4 << vex_w;
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
#endif
|
||||
default:
|
||||
BX_PANIC(("fetchdecode32: unknown definition %d for src %d", src, n));
|
||||
@ -1989,6 +2030,53 @@ decode_done:
|
||||
return(0);
|
||||
}
|
||||
|
||||
#if BX_SUPPORT_EVEX
|
||||
unsigned BX_CPU_C::evex_displ8_compression(bxInstruction_c *i, unsigned ia_opcode, unsigned type, unsigned vex_w)
|
||||
{
|
||||
if (ia_opcode == BX_IA_V512_VMOVDDUP_VpdWpd && i->getVL() == BX_VL128)
|
||||
return 8;
|
||||
|
||||
unsigned len = i->getVL();
|
||||
|
||||
switch (type) {
|
||||
case BX_VMM_FULL_VECTOR:
|
||||
if (i->getEvexb()) {
|
||||
return (4 << vex_w);
|
||||
}
|
||||
else {
|
||||
return (16 * len);
|
||||
}
|
||||
|
||||
case BX_VMM_SCALAR:
|
||||
return (4 << vex_w);
|
||||
|
||||
case BX_VMM_HALF_VECTOR:
|
||||
if (i->getEvexb()) {
|
||||
return (4 << vex_w);
|
||||
}
|
||||
else {
|
||||
return (8 * len);
|
||||
}
|
||||
|
||||
case BX_VMM_QUARTER_VECTOR:
|
||||
BX_ASSERT(i->getEvexb());
|
||||
return (4 * len);
|
||||
|
||||
case BX_VMM_OCT_VECTOR:
|
||||
BX_ASSERT(i->getEvexb());
|
||||
return (2 * len);
|
||||
|
||||
case BX_VMM_VEC128:
|
||||
return 16;
|
||||
|
||||
case BX_VMM_VEC256:
|
||||
return 32;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
Bit16u BX_CPU_C::WalkOpcodeTables(const BxOpcodeInfo_t *OpcodeInfoPtr, Bit16u &attr, unsigned modrm, unsigned sse_prefix, unsigned osize, unsigned vex_vl, bx_bool vex_w)
|
||||
{
|
||||
// Parse mod-nnn-rm and related bytes
|
||||
|
@ -79,7 +79,7 @@ enum {
|
||||
BX_SRC_EAX = 1,
|
||||
BX_SRC_NNN = 2,
|
||||
BX_SRC_RM = 3,
|
||||
// place holder
|
||||
BX_SRC_EVEX_RM = 4,
|
||||
BX_SRC_VVV = 5,
|
||||
BX_SRC_VIB = 6,
|
||||
BX_SRC_VSIB = 7 // gather/scatter vector index
|
||||
@ -103,6 +103,16 @@ enum {
|
||||
BX_BOUNDS_REG = 0xE
|
||||
};
|
||||
|
||||
enum {
|
||||
BX_VMM_FULL_VECTOR = 0,
|
||||
BX_VMM_SCALAR = 1,
|
||||
BX_VMM_HALF_VECTOR = 2,
|
||||
BX_VMM_QUARTER_VECTOR = 3,
|
||||
BX_VMM_OCT_VECTOR = 4,
|
||||
BX_VMM_VEC128 = 5,
|
||||
BX_VMM_VEC256 = 6
|
||||
};
|
||||
|
||||
enum {
|
||||
BX_IMMB = 0x10,
|
||||
BX_IMMW = 0x11,
|
||||
@ -189,6 +199,19 @@ const Bit8u OP_Wpd = BX_FORM_SRC(BX_VMM_REG, BX_SRC_RM);
|
||||
const Bit8u OP_Wss = BX_FORM_SRC(BX_VMM_REG, BX_SRC_RM);
|
||||
const Bit8u OP_Wsd = BX_FORM_SRC(BX_VMM_REG, BX_SRC_RM);
|
||||
|
||||
const Bit8u OP_mVps = BX_FORM_SRC(BX_VMM_FULL_VECTOR, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVpd = BX_FORM_SRC(BX_VMM_FULL_VECTOR, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVdq = BX_FORM_SRC(BX_VMM_FULL_VECTOR, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVss = BX_FORM_SRC(BX_VMM_SCALAR, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVsd = BX_FORM_SRC(BX_VMM_SCALAR, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVdq32 = BX_FORM_SRC(BX_VMM_SCALAR, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVdq64 = BX_FORM_SRC(BX_VMM_SCALAR, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVHV = BX_FORM_SRC(BX_VMM_HALF_VECTOR, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVQV = BX_FORM_SRC(BX_VMM_QUARTER_VECTOR, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVOV = BX_FORM_SRC(BX_VMM_OCT_VECTOR, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVdq128 = BX_FORM_SRC(BX_VMM_VEC128, BX_SRC_EVEX_RM);
|
||||
const Bit8u OP_mVdq256 = BX_FORM_SRC(BX_VMM_VEC256, BX_SRC_EVEX_RM);
|
||||
|
||||
const Bit8u OP_Hdq = BX_FORM_SRC(BX_VMM_REG, BX_SRC_VVV);
|
||||
const Bit8u OP_Hps = BX_FORM_SRC(BX_VMM_REG, BX_SRC_VVV);
|
||||
const Bit8u OP_Hpd = BX_FORM_SRC(BX_VMM_REG, BX_SRC_VVV);
|
||||
|
@ -1710,7 +1710,7 @@ BX_CPU_C::fetchDecode64(const Bit8u *iptr, Bit32u fetchModeMask, bxInstruction_c
|
||||
#endif
|
||||
|
||||
#if BX_SUPPORT_EVEX
|
||||
unsigned evex_v = 0;
|
||||
unsigned evex_v = 0, displ8 = 0;
|
||||
#endif
|
||||
|
||||
i->ResolveModrm = 0;
|
||||
@ -2103,6 +2103,9 @@ fetch_b1:
|
||||
if (remain != 0) {
|
||||
// 8 sign extended to 32
|
||||
i->modRMForm.displ32u = (Bit8s) *iptr++;
|
||||
#if BX_SUPPORT_EVEX
|
||||
displ8 = 1;
|
||||
#endif
|
||||
remain--;
|
||||
}
|
||||
else {
|
||||
@ -2320,8 +2323,25 @@ modrm_done:
|
||||
}
|
||||
else {
|
||||
i->setSrcReg(n, (type == BX_VMM_REG) ? BX_VECTOR_TMP_REGISTER : BX_TMP_REGISTER);
|
||||
#if BX_SUPPORT_EVEX
|
||||
if (b1 == 0x62 && displ8) {
|
||||
if (type == BX_GPR32) i->modRMForm.displ32u *= 4;
|
||||
else if (type == BX_GPR64) i->modRMForm.displ32u *= 8;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
#if BX_SUPPORT_EVEX
|
||||
case BX_SRC_EVEX_RM:
|
||||
if (! mod_mem) {
|
||||
i->setSrcReg(n, rm);
|
||||
}
|
||||
else {
|
||||
i->setSrcReg(n, BX_VECTOR_TMP_REGISTER);
|
||||
if (displ8) i->modRMForm.displ32u *= evex_displ8_compression(i, ia_opcode, type, vex_w);
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
#if BX_SUPPORT_AVX
|
||||
case BX_SRC_VVV:
|
||||
i->setSrcReg(n, vvv);
|
||||
@ -2344,6 +2364,7 @@ modrm_done:
|
||||
}
|
||||
#if BX_SUPPORT_EVEX
|
||||
i->setSibIndex(i->sibIndex() | evex_v);
|
||||
if (displ8) i->modRMForm.displ32u *= 4 << vex_w;
|
||||
#endif
|
||||
break;
|
||||
#endif
|
||||
|
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user