* i386: fix issue with cache topology passthrough
* scsi-disk: migrate emulated requests * i386/sev: fix Coverity issues * i386/tcg: more conversions to new decoder -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmZv6kMUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroOn4Af/evnpsae1fm8may1NQmmezKiks/4X cR0GaQ7w75Oas05jKsG7Xnrq3Vn6p5wllf3Wf00p7F1iJX18azY9rQgIsUVUgVem /EIZk1eM6+mDxuIG0taPxc5Aw3cfIBWAjUmzsXrSr55e/wyiIxZCeUo2zk8Il+iL Z4ceNzY5PZzc2Fl10D3cGs/+ynfiDM53ucwe3ve2T6NrxEVfKQPp5jkIUkBUba6z zM5O4Q5KTEZYVth1gbDTB/uUJLUFjQ12kCQfRCNX+bEPDHwARr0UWr/Oxtz0jZSd FvXohz7tI+v+ph0xHyE4tEFqryvLCII1td2ohTAYZZXNGkjK6XZildngBw== =m4BE -----END PGP SIGNATURE----- Merge tag 'for-upstream' of https://gitlab.com/bonzini/qemu into staging * i386: fix issue with cache topology passthrough * scsi-disk: migrate emulated requests * i386/sev: fix Coverity issues * i386/tcg: more conversions to new decoder # -----BEGIN PGP SIGNATURE----- # # iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAmZv6kMUHHBib256aW5p # QHJlZGhhdC5jb20ACgkQv/vSX3jHroOn4Af/evnpsae1fm8may1NQmmezKiks/4X # cR0GaQ7w75Oas05jKsG7Xnrq3Vn6p5wllf3Wf00p7F1iJX18azY9rQgIsUVUgVem # /EIZk1eM6+mDxuIG0taPxc5Aw3cfIBWAjUmzsXrSr55e/wyiIxZCeUo2zk8Il+iL # Z4ceNzY5PZzc2Fl10D3cGs/+ynfiDM53ucwe3ve2T6NrxEVfKQPp5jkIUkBUba6z # zM5O4Q5KTEZYVth1gbDTB/uUJLUFjQ12kCQfRCNX+bEPDHwARr0UWr/Oxtz0jZSd # FvXohz7tI+v+ph0xHyE4tEFqryvLCII1td2ohTAYZZXNGkjK6XZildngBw== # =m4BE # -----END PGP SIGNATURE----- # gpg: Signature made Mon 17 Jun 2024 12:48:19 AM PDT # gpg: using RSA key F13338574B662389866C7682BFFBD25F78C7AE83 # gpg: issuer "pbonzini@redhat.com" # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] * tag 'for-upstream' of https://gitlab.com/bonzini/qemu: (25 commits) target/i386: SEV: do not assume machine->cgs is SEV target/i386: convert CMPXCHG to new decoder target/i386: convert XADD to new decoder target/i386: convert LZCNT/TZCNT/BSF/BSR/POPCNT to new decoder target/i386: convert SHLD/SHRD to new decoder target/i386: adapt gen_shift_count for SHLD/SHRD target/i386: pull load/writeback out of gen_shiftd_rm_T1 target/i386: convert non-grouped, helper-based 2-byte opcodes target/i386: split X86_CHECK_prot into PE and VM86 checks target/i386: finish converting 0F AE to the new decoder target/i386: fix bad sorting of entries in the 0F table target/i386: replace read_crN helper with read_cr8 target/i386: convert MOV from/to CR and DR to new decoder target/i386: fix processing of intercept 0 (read CR0) target/i386: replace NoSeg special with NoLoadEA target/i386: change X86_ENTRYwr to use T0, use it for moves target/i386: change X86_ENTRYr to use T0 target/i386: put BLS* input in T1, use generic flag writeback target/i386: rewrite flags writeback for ADCX/ADOX target/i386: remove CPUX86State argument from generator functions ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
85743f54fa
@ -36,6 +36,7 @@
|
||||
|
||||
GlobalProperty hw_compat_9_0[] = {
|
||||
{"arm-cpu", "backcompat-cntfrq", "true" },
|
||||
{"scsi-disk-base", "migrate-emulated-scsi-request", "false" },
|
||||
{"vfio-pci", "skip-vsc-check", "false" },
|
||||
};
|
||||
const size_t hw_compat_9_0_len = G_N_ELEMENTS(hw_compat_9_0);
|
||||
|
@ -114,6 +114,7 @@ struct SCSIDiskState {
|
||||
* 0xffff - reserved
|
||||
*/
|
||||
uint16_t rotation_rate;
|
||||
bool migrate_emulated_scsi_request;
|
||||
};
|
||||
|
||||
static void scsi_free_request(SCSIRequest *req)
|
||||
@ -162,6 +163,15 @@ static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
|
||||
}
|
||||
}
|
||||
|
||||
static void scsi_disk_emulate_save_request(QEMUFile *f, SCSIRequest *req)
|
||||
{
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
|
||||
|
||||
if (s->migrate_emulated_scsi_request) {
|
||||
scsi_disk_save_request(f, req);
|
||||
}
|
||||
}
|
||||
|
||||
static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
|
||||
{
|
||||
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
|
||||
@ -185,6 +195,15 @@ static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
|
||||
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
|
||||
}
|
||||
|
||||
static void scsi_disk_emulate_load_request(QEMUFile *f, SCSIRequest *req)
|
||||
{
|
||||
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
|
||||
|
||||
if (s->migrate_emulated_scsi_request) {
|
||||
scsi_disk_load_request(f, req);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* scsi_handle_rw_error has two return values. False means that the error
|
||||
* must be ignored, true means that the error has been processed and the
|
||||
@ -2606,6 +2625,8 @@ static const SCSIReqOps scsi_disk_emulate_reqops = {
|
||||
.read_data = scsi_disk_emulate_read_data,
|
||||
.write_data = scsi_disk_emulate_write_data,
|
||||
.get_buf = scsi_get_buf,
|
||||
.load_request = scsi_disk_emulate_load_request,
|
||||
.save_request = scsi_disk_emulate_save_request,
|
||||
};
|
||||
|
||||
static const SCSIReqOps scsi_disk_dma_reqops = {
|
||||
@ -3114,7 +3135,8 @@ static const TypeInfo scsi_disk_base_info = {
|
||||
DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
|
||||
DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
|
||||
DEFINE_PROP_STRING("product", SCSIDiskState, product), \
|
||||
DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
|
||||
DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id), \
|
||||
DEFINE_PROP_BOOL("migrate-emulated-scsi-request", SCSIDiskState, migrate_emulated_scsi_request, true)
|
||||
|
||||
|
||||
static Property scsi_hd_properties[] = {
|
||||
|
@ -6455,10 +6455,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
|
||||
if (*eax & 31) {
|
||||
int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
|
||||
|
||||
if (cores_per_pkg > 1) {
|
||||
*eax &= ~0xFC000000;
|
||||
*eax |= max_core_ids_in_package(&topo_info) << 26;
|
||||
}
|
||||
*eax &= ~0xFC000000;
|
||||
*eax |= max_core_ids_in_package(&topo_info) << 26;
|
||||
if (host_vcpus_per_cache > threads_per_pkg) {
|
||||
*eax &= ~0x3FFC000;
|
||||
|
||||
|
@ -1260,6 +1260,8 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
||||
/* Use a clearer name for this. */
|
||||
#define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET
|
||||
|
||||
#define CC_OP_HAS_EFLAGS(op) ((op) >= CC_OP_EFLAGS && (op) <= CC_OP_ADCOX)
|
||||
|
||||
/* Instead of computing the condition codes after each x86 instruction,
|
||||
* QEMU just stores one operand (called CC_SRC), the result
|
||||
* (called CC_DST) and the type of operation (called CC_OP). When the
|
||||
@ -1270,6 +1272,9 @@ uint64_t x86_cpu_get_supported_feature_word(FeatureWord w,
|
||||
typedef enum {
|
||||
CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
|
||||
CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */
|
||||
CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
|
||||
CC_OP_ADOX, /* CC_SRC2 = O, CC_SRC = rest. */
|
||||
CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
|
||||
|
||||
CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */
|
||||
CC_OP_MULW,
|
||||
@ -1326,10 +1331,6 @@ typedef enum {
|
||||
CC_OP_BMILGL,
|
||||
CC_OP_BMILGQ,
|
||||
|
||||
CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */
|
||||
CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */
|
||||
CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */
|
||||
|
||||
CC_OP_CLR, /* Z set, all other flags clear. */
|
||||
CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */
|
||||
|
||||
|
@ -95,7 +95,7 @@ DEF_HELPER_FLAGS_2(monitor, TCG_CALL_NO_WG, void, env, tl)
|
||||
DEF_HELPER_FLAGS_2(mwait, TCG_CALL_NO_WG, noreturn, env, int)
|
||||
DEF_HELPER_1(rdmsr, void, env)
|
||||
DEF_HELPER_1(wrmsr, void, env)
|
||||
DEF_HELPER_FLAGS_2(read_crN, TCG_CALL_NO_RWG, tl, env, int)
|
||||
DEF_HELPER_FLAGS_1(read_cr8, TCG_CALL_NO_RWG, tl, env)
|
||||
DEF_HELPER_FLAGS_3(write_crN, TCG_CALL_NO_RWG, void, env, int, tl)
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
|
@ -587,6 +587,7 @@ static SevCapability *sev_get_capabilities(Error **errp)
|
||||
sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
|
||||
if (!sev_common) {
|
||||
error_setg(errp, "SEV is not configured");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sev_device = object_property_get_str(OBJECT(sev_common), "sev-device",
|
||||
@ -1529,11 +1530,12 @@ int
|
||||
sev_encrypt_flash(hwaddr gpa, uint8_t *ptr, uint64_t len, Error **errp)
|
||||
{
|
||||
SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
|
||||
SevCommonStateClass *klass = SEV_COMMON_GET_CLASS(sev_common);
|
||||
SevCommonStateClass *klass;
|
||||
|
||||
if (!sev_common) {
|
||||
return 0;
|
||||
}
|
||||
klass = SEV_COMMON_GET_CLASS(sev_common);
|
||||
|
||||
/* if SEV is in update state then encrypt the data else do nothing */
|
||||
if (sev_check_state(sev_common, SEV_STATE_LAUNCH_UPDATE)) {
|
||||
@ -1710,7 +1712,9 @@ void sev_es_set_reset_vector(CPUState *cpu)
|
||||
{
|
||||
X86CPU *x86;
|
||||
CPUX86State *env;
|
||||
SevCommonState *sev_common = SEV_COMMON(MACHINE(qdev_get_machine())->cgs);
|
||||
ConfidentialGuestSupport *cgs = MACHINE(qdev_get_machine())->cgs;
|
||||
SevCommonState *sev_common = SEV_COMMON(
|
||||
object_dynamic_cast(OBJECT(cgs), TYPE_SEV_COMMON));
|
||||
|
||||
/* Only update if we have valid reset information */
|
||||
if (!sev_common || !sev_common->reset_data_valid) {
|
||||
@ -2165,6 +2169,7 @@ sev_snp_guest_set_id_block(Object *obj, const char *value, Error **errp)
|
||||
struct kvm_sev_snp_launch_finish *finish = &sev_snp_guest->kvm_finish_conf;
|
||||
gsize len;
|
||||
|
||||
finish->id_block_en = 0;
|
||||
g_free(sev_snp_guest->id_block);
|
||||
g_free((guchar *)finish->id_block_uaddr);
|
||||
|
||||
@ -2184,7 +2189,7 @@ sev_snp_guest_set_id_block(Object *obj, const char *value, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
finish->id_block_en = (len) ? 1 : 0;
|
||||
finish->id_block_en = 1;
|
||||
}
|
||||
|
||||
static char *
|
||||
|
@ -151,6 +151,8 @@
|
||||
X86_OP_GROUP3(op, op0, s0, 2op, s0, op1, s1, ## __VA_ARGS__)
|
||||
#define X86_OP_GROUPw(op, op0, s0, ...) \
|
||||
X86_OP_GROUP3(op, op0, s0, None, None, None, None, ## __VA_ARGS__)
|
||||
#define X86_OP_GROUPwr(op, op0, s0, op1, s1, ...) \
|
||||
X86_OP_GROUP3(op, op0, s0, op1, s1, None, None, ## __VA_ARGS__)
|
||||
#define X86_OP_GROUP0(op, ...) \
|
||||
X86_OP_GROUP3(op, None, None, None, None, None, None, ## __VA_ARGS__)
|
||||
|
||||
@ -180,20 +182,20 @@
|
||||
#define X86_OP_ENTRYrr(op, op0, s0, op1, s1, ...) \
|
||||
X86_OP_ENTRY3(op, None, None, op0, s0, op1, s1, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRYwr(op, op0, s0, op1, s1, ...) \
|
||||
X86_OP_ENTRY3(op, op0, s0, None, None, op1, s1, ## __VA_ARGS__)
|
||||
X86_OP_ENTRY3(op, op0, s0, op1, s1, None, None, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRY2(op, op0, s0, op1, s1, ...) \
|
||||
X86_OP_ENTRY3(op, op0, s0, 2op, s0, op1, s1, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRYw(op, op0, s0, ...) \
|
||||
X86_OP_ENTRY3(op, op0, s0, None, None, None, None, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRYr(op, op0, s0, ...) \
|
||||
X86_OP_ENTRY3(op, None, None, None, None, op0, s0, ## __VA_ARGS__)
|
||||
X86_OP_ENTRY3(op, None, None, op0, s0, None, None, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRY1(op, op0, s0, ...) \
|
||||
X86_OP_ENTRY3(op, op0, s0, 2op, s0, None, None, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRY0(op, ...) \
|
||||
X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__)
|
||||
|
||||
#define cpuid(feat) .cpuid = X86_FEAT_##feat,
|
||||
#define noseg .special = X86_SPECIAL_NoSeg,
|
||||
#define nolea .special = X86_SPECIAL_NoLoadEA,
|
||||
#define xchg .special = X86_SPECIAL_Locked,
|
||||
#define lock .special = X86_SPECIAL_HasLock,
|
||||
#define mmx .special = X86_SPECIAL_MMX,
|
||||
@ -221,7 +223,9 @@
|
||||
#define vex13 .vex_class = 13,
|
||||
|
||||
#define chk(a) .check = X86_CHECK_##a,
|
||||
#define svm(a) .intercept = SVM_EXIT_##a,
|
||||
#define chk2(a, b) .check = X86_CHECK_##a | X86_CHECK_##b,
|
||||
#define chk3(a, b, c) .check = X86_CHECK_##a | X86_CHECK_##b | X86_CHECK_##c,
|
||||
#define svm(a) .intercept = SVM_EXIT_##a, .has_intercept = true,
|
||||
|
||||
#define avx2_256 .vex_special = X86_VEX_AVX2_256,
|
||||
|
||||
@ -267,20 +271,41 @@ static inline const X86OpEntry *decode_by_prefix(DisasContext *s, const X86OpEnt
|
||||
|
||||
static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
|
||||
{
|
||||
/* only includes ldmxcsr and stmxcsr, because they have AVX variants. */
|
||||
static const X86OpEntry group15_reg[8] = {
|
||||
[0] = X86_OP_ENTRYw(RDxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3),
|
||||
[1] = X86_OP_ENTRYw(RDxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3),
|
||||
[2] = X86_OP_ENTRYr(WRxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3 zextT0),
|
||||
[3] = X86_OP_ENTRYr(WRxxBASE, R,y, cpuid(FSGSBASE) chk(o64) p_f3 zextT0),
|
||||
[5] = X86_OP_ENTRY0(LFENCE, cpuid(SSE2) p_00),
|
||||
[6] = X86_OP_ENTRY0(MFENCE, cpuid(SSE2) p_00),
|
||||
[7] = X86_OP_ENTRY0(SFENCE, cpuid(SSE2) p_00),
|
||||
};
|
||||
|
||||
static const X86OpEntry group15_mem[8] = {
|
||||
[2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5 chk(VEX128)),
|
||||
[3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5 chk(VEX128)),
|
||||
[0] = X86_OP_ENTRYw(FXSAVE, M,y, cpuid(FXSR) p_00),
|
||||
[1] = X86_OP_ENTRYr(FXRSTOR, M,y, cpuid(FXSR) p_00),
|
||||
[2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5 chk(VEX128) p_00),
|
||||
[3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5 chk(VEX128) p_00),
|
||||
[4] = X86_OP_ENTRYw(XSAVE, M,y, cpuid(XSAVE) p_00),
|
||||
[5] = X86_OP_ENTRYr(XRSTOR, M,y, cpuid(XSAVE) p_00),
|
||||
[6] = X86_OP_ENTRYw(XSAVEOPT, M,b, cpuid(XSAVEOPT) p_00),
|
||||
[7] = X86_OP_ENTRYw(NOP, M,b, cpuid(CLFLUSH) p_00),
|
||||
};
|
||||
|
||||
static const X86OpEntry group15_mem_66[8] = {
|
||||
[6] = X86_OP_ENTRYw(NOP, M,b, cpuid(CLWB)),
|
||||
[7] = X86_OP_ENTRYw(NOP, M,b, cpuid(CLFLUSHOPT)),
|
||||
};
|
||||
|
||||
uint8_t modrm = get_modrm(s, env);
|
||||
int op = (modrm >> 3) & 7;
|
||||
|
||||
if ((modrm >> 6) == 3) {
|
||||
*entry = group15_reg[(modrm >> 3) & 7];
|
||||
*entry = group15_reg[op];
|
||||
} else if (s->prefix & PREFIX_DATA) {
|
||||
*entry = group15_mem_66[op];
|
||||
} else {
|
||||
*entry = group15_mem[(modrm >> 3) & 7];
|
||||
*entry = group15_mem[op];
|
||||
}
|
||||
}
|
||||
|
||||
@ -425,6 +450,50 @@ static void decode_0F7F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
|
||||
*entry = *decode_by_prefix(s, opcodes_0F7F);
|
||||
}
|
||||
|
||||
static void decode_0FB8(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
|
||||
{
|
||||
static const X86OpEntry popcnt =
|
||||
X86_OP_ENTRYwr(POPCNT, G,v, E,v, cpuid(POPCNT) zextT0);
|
||||
|
||||
if (s->prefix & PREFIX_REPZ) {
|
||||
*entry = popcnt;
|
||||
} else {
|
||||
memset(entry, 0, sizeof(*entry));
|
||||
}
|
||||
}
|
||||
|
||||
static void decode_0FBC(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
|
||||
{
|
||||
/* For BSF, pass 2op as the third operand so that we can use zextT0 */
|
||||
static const X86OpEntry opcodes_0FBC[4] = {
|
||||
X86_OP_ENTRY3(BSF, G,v, E,v, 2op,v, zextT0),
|
||||
X86_OP_ENTRY3(BSF, G,v, E,v, 2op,v, zextT0), /* 0x66 */
|
||||
X86_OP_ENTRYwr(TZCNT, G,v, E,v, zextT0), /* 0xf3 */
|
||||
X86_OP_ENTRY3(BSF, G,v, E,v, 2op,v, zextT0), /* 0xf2 */
|
||||
};
|
||||
if (!(s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
|
||||
*entry = opcodes_0FBC[0];
|
||||
} else {
|
||||
*entry = *decode_by_prefix(s, opcodes_0FBC);
|
||||
}
|
||||
}
|
||||
|
||||
static void decode_0FBD(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
|
||||
{
|
||||
/* For BSR, pass 2op as the third operand so that we can use zextT0 */
|
||||
static const X86OpEntry opcodes_0FBD[4] = {
|
||||
X86_OP_ENTRY3(BSR, G,v, E,v, 2op,v, zextT0),
|
||||
X86_OP_ENTRY3(BSR, G,v, E,v, 2op,v, zextT0), /* 0x66 */
|
||||
X86_OP_ENTRYwr(LZCNT, G,v, E,v, zextT0), /* 0xf3 */
|
||||
X86_OP_ENTRY3(BSR, G,v, E,v, 2op,v, zextT0), /* 0xf2 */
|
||||
};
|
||||
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
|
||||
*entry = opcodes_0FBD[0];
|
||||
} else {
|
||||
*entry = *decode_by_prefix(s, opcodes_0FBD);
|
||||
}
|
||||
}
|
||||
|
||||
static void decode_0FD6(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
|
||||
{
|
||||
static const X86OpEntry movq[4] = {
|
||||
@ -612,15 +681,15 @@ static const X86OpEntry opcodes_0F38_00toEF[240] = {
|
||||
/* five rows for no prefix, 66, F3, F2, 66+F2 */
|
||||
static const X86OpEntry opcodes_0F38_F0toFF[16][5] = {
|
||||
[0] = {
|
||||
X86_OP_ENTRY3(MOVBE, G,y, M,y, None,None, cpuid(MOVBE)),
|
||||
X86_OP_ENTRY3(MOVBE, G,w, M,w, None,None, cpuid(MOVBE)),
|
||||
X86_OP_ENTRYwr(MOVBE, G,y, M,y, cpuid(MOVBE)),
|
||||
X86_OP_ENTRYwr(MOVBE, G,w, M,w, cpuid(MOVBE)),
|
||||
{},
|
||||
X86_OP_ENTRY2(CRC32, G,d, E,b, cpuid(SSE42)),
|
||||
X86_OP_ENTRY2(CRC32, G,d, E,b, cpuid(SSE42)),
|
||||
},
|
||||
[1] = {
|
||||
X86_OP_ENTRY3(MOVBE, M,y, G,y, None,None, cpuid(MOVBE)),
|
||||
X86_OP_ENTRY3(MOVBE, M,w, G,w, None,None, cpuid(MOVBE)),
|
||||
X86_OP_ENTRYwr(MOVBE, M,y, G,y, cpuid(MOVBE)),
|
||||
X86_OP_ENTRYwr(MOVBE, M,w, G,w, cpuid(MOVBE)),
|
||||
{},
|
||||
X86_OP_ENTRY2(CRC32, G,d, E,y, cpuid(SSE42)),
|
||||
X86_OP_ENTRY2(CRC32, G,d, E,w, cpuid(SSE42)),
|
||||
@ -633,7 +702,7 @@ static const X86OpEntry opcodes_0F38_F0toFF[16][5] = {
|
||||
{},
|
||||
},
|
||||
[3] = {
|
||||
X86_OP_GROUP3(group17, B,y, E,y, None,None, vex13 cpuid(BMI1)),
|
||||
X86_OP_GROUP3(group17, B,y, None,None, E,y, vex13 cpuid(BMI1)),
|
||||
{},
|
||||
{},
|
||||
{},
|
||||
@ -985,14 +1054,30 @@ static void decode_0FE6(DisasContext *s, CPUX86State *env, X86OpEntry *entry, ui
|
||||
*entry = *decode_by_prefix(s, opcodes_0FE6);
|
||||
}
|
||||
|
||||
static const X86OpEntry opcodes_0F[256] = {
|
||||
[0x0E] = X86_OP_ENTRY0(EMMS, cpuid(3DNOW)), /* femms */
|
||||
/*
|
||||
* These ignore the mod bits (assume (modrm&0xc0)==0xc0), so group the
|
||||
* pre-decode tweak here for all MOVs from/to CR and DR.
|
||||
*
|
||||
* AMD documentation (24594.pdf) and testing of Intel 386 and 486
|
||||
* processors all show that the mod bits are assumed to be 1's,
|
||||
* regardless of actual values.
|
||||
*/
|
||||
static void decode_MOV_CR_DR(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
|
||||
{
|
||||
/*
|
||||
* 3DNow!'s opcode byte comes *after* modrm and displacements, making it
|
||||
* more like an Ib operand. Dispatch to the right helper in a single gen_*
|
||||
* function.
|
||||
*/
|
||||
[0x0F] = X86_OP_ENTRY3(3dnow, P,q, Q,q, I,b, cpuid(3DNOW)),
|
||||
get_modrm(s, env);
|
||||
s->modrm |= 0xC0;
|
||||
|
||||
entry->gen = gen_MOV;
|
||||
}
|
||||
|
||||
static const X86OpEntry opcodes_0F[256] = {
|
||||
[0x02] = X86_OP_ENTRYwr(LAR, G,v, E,w, chk(prot)),
|
||||
[0x03] = X86_OP_ENTRYwr(LSL, G,v, E,w, chk(prot)),
|
||||
[0x05] = X86_OP_ENTRY0(SYSCALL, chk(o64_intel)),
|
||||
[0x06] = X86_OP_ENTRY0(CLTS, chk(cpl0) svm(WRITE_CR0)),
|
||||
[0x07] = X86_OP_ENTRY0(SYSRET, chk3(o64_intel, prot, cpl0)),
|
||||
|
||||
[0x10] = X86_OP_GROUP0(0F10),
|
||||
[0x11] = X86_OP_GROUP0(0F11),
|
||||
@ -1004,6 +1089,22 @@ static const X86OpEntry opcodes_0F[256] = {
|
||||
/* Incorrectly listed as Mq,Vq in the manual */
|
||||
[0x17] = X86_OP_ENTRY3(VMOVHPx_st, M,q, None,None, V,dq, vex5 p_00_66),
|
||||
|
||||
/*
|
||||
* Incorrectly listed as using "d" operand type in the manual. In reality
|
||||
* there's no 16-bit version (like y) and it does not use REX.W (like d64).
|
||||
*/
|
||||
[0x20] = X86_OP_GROUPwr(MOV_CR_DR, R,y_d64, C,y_d64, chk(cpl0) svm(READ_CR0)),
|
||||
[0x21] = X86_OP_GROUPwr(MOV_CR_DR, R,y_d64, D,y_d64, chk(cpl0) svm(READ_DR0)),
|
||||
[0x22] = X86_OP_GROUPwr(MOV_CR_DR, C,y_d64, R,y_d64, zextT0 chk(cpl0) svm(WRITE_CR0)),
|
||||
[0x23] = X86_OP_GROUPwr(MOV_CR_DR, D,y_d64, R,y_d64, zextT0 chk(cpl0) svm(WRITE_DR0)),
|
||||
|
||||
[0x30] = X86_OP_ENTRY0(WRMSR, chk(cpl0)),
|
||||
[0x31] = X86_OP_ENTRY0(RDTSC),
|
||||
[0x32] = X86_OP_ENTRY0(RDMSR, chk(cpl0)),
|
||||
[0x33] = X86_OP_ENTRY0(RDPMC),
|
||||
[0x34] = X86_OP_ENTRY0(SYSENTER, chk2(i64_amd, prot_or_vm86)),
|
||||
[0x35] = X86_OP_ENTRY0(SYSEXIT, chk3(i64_amd, prot, cpl0)),
|
||||
|
||||
[0x40] = X86_OP_ENTRY2(CMOVcc, G,v, E,v, cpuid(CMOV)),
|
||||
[0x41] = X86_OP_ENTRY2(CMOVcc, G,v, E,v, cpuid(CMOV)),
|
||||
[0x42] = X86_OP_ENTRY2(CMOVcc, G,v, E,v, cpuid(CMOV)),
|
||||
@ -1060,9 +1161,64 @@ static const X86OpEntry opcodes_0F[256] = {
|
||||
|
||||
[0xa0] = X86_OP_ENTRYr(PUSH, FS, w),
|
||||
[0xa1] = X86_OP_ENTRYw(POP, FS, w),
|
||||
[0xa2] = X86_OP_ENTRY0(CPUID),
|
||||
[0xa4] = X86_OP_ENTRY4(SHLD, E,v, 2op,v, G,v),
|
||||
[0xa5] = X86_OP_ENTRY3(SHLD, E,v, 2op,v, G,v),
|
||||
|
||||
[0xb0] = X86_OP_ENTRY2(CMPXCHG,E,b, G,b, lock),
|
||||
[0xb1] = X86_OP_ENTRY2(CMPXCHG,E,v, G,v, lock),
|
||||
[0xb2] = X86_OP_ENTRY3(LSS, G,v, EM,p, None, None),
|
||||
[0xb4] = X86_OP_ENTRY3(LFS, G,v, EM,p, None, None),
|
||||
[0xb5] = X86_OP_ENTRY3(LGS, G,v, EM,p, None, None),
|
||||
[0xb6] = X86_OP_ENTRY3(MOV, G,v, E,b, None, None, zextT0), /* MOVZX */
|
||||
[0xb7] = X86_OP_ENTRY3(MOV, G,v, E,w, None, None, zextT0), /* MOVZX */
|
||||
|
||||
[0xc0] = X86_OP_ENTRY2(XADD, E,b, G,b, lock),
|
||||
[0xc1] = X86_OP_ENTRY2(XADD, E,v, G,v, lock),
|
||||
[0xc2] = X86_OP_ENTRY4(VCMP, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2),
|
||||
[0xc3] = X86_OP_ENTRY3(MOV, EM,y,G,y, None,None, cpuid(SSE2)), /* MOVNTI */
|
||||
[0xc4] = X86_OP_ENTRY4(PINSRW, V,dq,H,dq,E,w, vex5 mmx p_00_66),
|
||||
[0xc5] = X86_OP_ENTRY3(PEXTRW, G,d, U,dq,I,b, vex5 mmx p_00_66),
|
||||
[0xc6] = X86_OP_ENTRY4(VSHUF, V,x, H,x, W,x, vex4 p_00_66),
|
||||
|
||||
[0xd0] = X86_OP_ENTRY3(VADDSUB, V,x, H,x, W,x, vex2 cpuid(SSE3) p_66_f2),
|
||||
[0xd1] = X86_OP_ENTRY3(PSRLW_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd2] = X86_OP_ENTRY3(PSRLD_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd3] = X86_OP_ENTRY3(PSRLQ_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd4] = X86_OP_ENTRY3(PADDQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd5] = X86_OP_ENTRY3(PMULLW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd6] = X86_OP_GROUP0(0FD6),
|
||||
[0xd7] = X86_OP_ENTRY3(PMOVMSKB, G,d, None,None, U,x, vex7 mmx avx2_256 p_00_66),
|
||||
|
||||
[0xe0] = X86_OP_ENTRY3(PAVGB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xe1] = X86_OP_ENTRY3(PSRAW_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66),
|
||||
[0xe2] = X86_OP_ENTRY3(PSRAD_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66),
|
||||
[0xe3] = X86_OP_ENTRY3(PAVGW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xe4] = X86_OP_ENTRY3(PMULHUW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xe5] = X86_OP_ENTRY3(PMULHW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xe6] = X86_OP_GROUP0(0FE6),
|
||||
[0xe7] = X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1 mmx p_00_66), /* MOVNTQ/MOVNTDQ */
|
||||
|
||||
[0xf0] = X86_OP_ENTRY3(MOVDQ, V,x, None,None, WM,x, vex4_unal cpuid(SSE3) p_f2), /* LDDQU */
|
||||
[0xf1] = X86_OP_ENTRY3(PSLLW_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66),
|
||||
[0xf2] = X86_OP_ENTRY3(PSLLD_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66),
|
||||
[0xf3] = X86_OP_ENTRY3(PSLLQ_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66),
|
||||
[0xf4] = X86_OP_ENTRY3(PMULUDQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xf5] = X86_OP_ENTRY3(PMADDWD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xf6] = X86_OP_ENTRY3(PSADBW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xf7] = X86_OP_ENTRY3(MASKMOV, None,None, V,dq, U,dq, vex4_unal avx2_256 mmx p_00_66),
|
||||
|
||||
[0x08] = X86_OP_ENTRY0(NOP, svm(INVD)),
|
||||
[0x09] = X86_OP_ENTRY0(NOP, svm(WBINVD)),
|
||||
[0x0b] = X86_OP_ENTRY0(UD), /* UD2 */
|
||||
[0x0d] = X86_OP_ENTRY1(NOP, M,v), /* 3DNow! prefetch */
|
||||
[0x0e] = X86_OP_ENTRY0(EMMS, cpuid(3DNOW)), /* femms */
|
||||
/*
|
||||
* 3DNow!'s opcode byte comes *after* modrm and displacements, making it
|
||||
* more like an Ib operand. Dispatch to the right helper in a single gen_*
|
||||
* function.
|
||||
*/
|
||||
[0x0f] = X86_OP_ENTRY3(3dnow, P,q, Q,q, I,b, cpuid(3DNOW)),
|
||||
|
||||
[0x18] = X86_OP_ENTRY1(NOP, nop,v), /* prefetch/reserved NOP */
|
||||
[0x19] = X86_OP_ENTRY1(NOP, nop,v), /* reserved NOP */
|
||||
@ -1137,6 +1293,9 @@ static const X86OpEntry opcodes_0F[256] = {
|
||||
|
||||
[0xa8] = X86_OP_ENTRYr(PUSH, GS, w),
|
||||
[0xa9] = X86_OP_ENTRYw(POP, GS, w),
|
||||
[0xaa] = X86_OP_ENTRY0(RSM, chk(smm) svm(RSM)),
|
||||
[0xac] = X86_OP_ENTRY4(SHRD, E,v, 2op,v, G,v),
|
||||
[0xad] = X86_OP_ENTRY3(SHRD, E,v, 2op,v, G,v),
|
||||
[0xae] = X86_OP_GROUP0(group15),
|
||||
/*
|
||||
* It's slightly more efficient to put Ev operand in T0 and allow gen_IMUL3
|
||||
@ -1144,23 +1303,14 @@ static const X86OpEntry opcodes_0F[256] = {
|
||||
*/
|
||||
[0xaf] = X86_OP_ENTRY3(IMUL3, G,v, E,v, 2op,v, sextT0),
|
||||
|
||||
[0xb2] = X86_OP_ENTRY3(LSS, G,v, EM,p, None, None),
|
||||
[0xb4] = X86_OP_ENTRY3(LFS, G,v, EM,p, None, None),
|
||||
[0xb5] = X86_OP_ENTRY3(LGS, G,v, EM,p, None, None),
|
||||
[0xb6] = X86_OP_ENTRY3(MOV, G,v, E,b, None, None, zextT0), /* MOVZX */
|
||||
[0xb7] = X86_OP_ENTRY3(MOV, G,v, E,w, None, None, zextT0), /* MOVZX */
|
||||
|
||||
[0xb8] = X86_OP_GROUP0(0FB8),
|
||||
/* decoded as modrm, which is visible as a difference between page fault and #UD */
|
||||
[0xb9] = X86_OP_ENTRYr(UD, nop,v), /* UD1 */
|
||||
[0xbc] = X86_OP_GROUP0(0FBC),
|
||||
[0xbd] = X86_OP_GROUP0(0FBD),
|
||||
[0xbe] = X86_OP_ENTRY3(MOV, G,v, E,b, None, None, sextT0), /* MOVSX */
|
||||
[0xbf] = X86_OP_ENTRY3(MOV, G,v, E,w, None, None, sextT0), /* MOVSX */
|
||||
|
||||
[0xc2] = X86_OP_ENTRY4(VCMP, V,x, H,x, W,x, vex2_rep3 p_00_66_f3_f2),
|
||||
[0xc3] = X86_OP_ENTRY3(MOV, EM,y,G,y, None,None, cpuid(SSE2)), /* MOVNTI */
|
||||
[0xc4] = X86_OP_ENTRY4(PINSRW, V,dq,H,dq,E,w, vex5 mmx p_00_66),
|
||||
[0xc5] = X86_OP_ENTRY3(PEXTRW, G,d, U,dq,I,b, vex5 mmx p_00_66),
|
||||
[0xc6] = X86_OP_ENTRY4(VSHUF, V,x, H,x, W,x, vex4 p_00_66),
|
||||
|
||||
[0xc8] = X86_OP_ENTRY1(BSWAP, LoBits,y),
|
||||
[0xc9] = X86_OP_ENTRY1(BSWAP, LoBits,y),
|
||||
[0xca] = X86_OP_ENTRY1(BSWAP, LoBits,y),
|
||||
@ -1170,33 +1320,6 @@ static const X86OpEntry opcodes_0F[256] = {
|
||||
[0xce] = X86_OP_ENTRY1(BSWAP, LoBits,y),
|
||||
[0xcf] = X86_OP_ENTRY1(BSWAP, LoBits,y),
|
||||
|
||||
[0xd0] = X86_OP_ENTRY3(VADDSUB, V,x, H,x, W,x, vex2 cpuid(SSE3) p_66_f2),
|
||||
[0xd1] = X86_OP_ENTRY3(PSRLW_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd2] = X86_OP_ENTRY3(PSRLD_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd3] = X86_OP_ENTRY3(PSRLQ_r, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd4] = X86_OP_ENTRY3(PADDQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd5] = X86_OP_ENTRY3(PMULLW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd6] = X86_OP_GROUP0(0FD6),
|
||||
[0xd7] = X86_OP_ENTRY3(PMOVMSKB, G,d, None,None, U,x, vex7 mmx avx2_256 p_00_66),
|
||||
|
||||
[0xe0] = X86_OP_ENTRY3(PAVGB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xe1] = X86_OP_ENTRY3(PSRAW_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66),
|
||||
[0xe2] = X86_OP_ENTRY3(PSRAD_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66),
|
||||
[0xe3] = X86_OP_ENTRY3(PAVGW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xe4] = X86_OP_ENTRY3(PMULHUW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xe5] = X86_OP_ENTRY3(PMULHW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xe6] = X86_OP_GROUP0(0FE6),
|
||||
[0xe7] = X86_OP_ENTRY3(MOVDQ, W,x, None,None, V,x, vex1 mmx p_00_66), /* MOVNTQ/MOVNTDQ */
|
||||
|
||||
[0xf0] = X86_OP_ENTRY3(MOVDQ, V,x, None,None, WM,x, vex4_unal cpuid(SSE3) p_f2), /* LDDQU */
|
||||
[0xf1] = X86_OP_ENTRY3(PSLLW_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66),
|
||||
[0xf2] = X86_OP_ENTRY3(PSLLD_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66),
|
||||
[0xf3] = X86_OP_ENTRY3(PSLLQ_r, V,x, H,x, W,x, vex7 mmx avx2_256 p_00_66),
|
||||
[0xf4] = X86_OP_ENTRY3(PMULUDQ, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xf5] = X86_OP_ENTRY3(PMADDWD, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xf6] = X86_OP_ENTRY3(PSADBW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xf7] = X86_OP_ENTRY3(MASKMOV, None,None, V,dq, U,dq, vex4_unal avx2_256 mmx p_00_66),
|
||||
|
||||
/* Incorrectly missing from 2-17 */
|
||||
[0xd8] = X86_OP_ENTRY3(PSUBUSB, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
[0xd9] = X86_OP_ENTRY3(PSUBUSW, V,x, H,x, W,x, vex4 mmx avx2_256 p_00_66),
|
||||
@ -1335,9 +1458,9 @@ static void decode_group4_5(DisasContext *s, CPUX86State *env, X86OpEntry *entry
|
||||
/* 0xff */
|
||||
[0x08] = X86_OP_ENTRY1(INC, E,v, lock),
|
||||
[0x09] = X86_OP_ENTRY1(DEC, E,v, lock),
|
||||
[0x0a] = X86_OP_ENTRY3(CALL_m, None, None, E,f64, None, None, zextT0),
|
||||
[0x0a] = X86_OP_ENTRYr(CALL_m, E,f64, zextT0),
|
||||
[0x0b] = X86_OP_ENTRYr(CALLF_m, M,p),
|
||||
[0x0c] = X86_OP_ENTRY3(JMP_m, None, None, E,f64, None, None, zextT0),
|
||||
[0x0c] = X86_OP_ENTRYr(JMP_m, E,f64, zextT0),
|
||||
[0x0d] = X86_OP_ENTRYr(JMPF_m, M,p),
|
||||
[0x0e] = X86_OP_ENTRYr(PUSH, E,f64),
|
||||
};
|
||||
@ -1586,18 +1709,18 @@ static const X86OpEntry opcodes_root[256] = {
|
||||
[0x7E] = X86_OP_ENTRYr(Jcc, J,b),
|
||||
[0x7F] = X86_OP_ENTRYr(Jcc, J,b),
|
||||
|
||||
[0x88] = X86_OP_ENTRY3(MOV, E,b, G,b, None, None),
|
||||
[0x89] = X86_OP_ENTRY3(MOV, E,v, G,v, None, None),
|
||||
[0x8A] = X86_OP_ENTRY3(MOV, G,b, E,b, None, None),
|
||||
[0x8B] = X86_OP_ENTRY3(MOV, G,v, E,v, None, None),
|
||||
/* Missing in Table A-2: memory destination is always 16-bit. */
|
||||
[0x8C] = X86_OP_ENTRY3(MOV, E,v, S,w, None, None, op0_Mw),
|
||||
[0x8D] = X86_OP_ENTRY3(LEA, G,v, M,v, None, None, noseg),
|
||||
[0x8E] = X86_OP_ENTRY3(MOV, S,w, E,w, None, None),
|
||||
[0x88] = X86_OP_ENTRYwr(MOV, E,b, G,b),
|
||||
[0x89] = X86_OP_ENTRYwr(MOV, E,v, G,v),
|
||||
[0x8A] = X86_OP_ENTRYwr(MOV, G,b, E,b),
|
||||
[0x8B] = X86_OP_ENTRYwr(MOV, G,v, E,v),
|
||||
/* Missing in Table A-2: memory destination is always 16-bit. */
|
||||
[0x8C] = X86_OP_ENTRYwr(MOV, E,v, S,w, op0_Mw),
|
||||
[0x8D] = X86_OP_ENTRYwr(LEA, G,v, M,v, nolea),
|
||||
[0x8E] = X86_OP_ENTRYwr(MOV, S,w, E,w),
|
||||
[0x8F] = X86_OP_GROUPw(group1A, E,v),
|
||||
|
||||
[0x98] = X86_OP_ENTRY1(CBW, 0,v), /* rAX */
|
||||
[0x99] = X86_OP_ENTRY3(CWD, 2,v, 0,v, None, None), /* rDX, rAX */
|
||||
[0x99] = X86_OP_ENTRYwr(CWD, 2,v, 0,v), /* rDX, rAX */
|
||||
[0x9A] = X86_OP_ENTRYrr(CALLF, I_unsigned,p, I_unsigned,w, chk(i64)),
|
||||
[0x9B] = X86_OP_ENTRY0(WAIT),
|
||||
[0x9C] = X86_OP_ENTRY0(PUSHF, chk(vm86_iopl) svm(PUSHF)),
|
||||
@ -1607,22 +1730,22 @@ static const X86OpEntry opcodes_root[256] = {
|
||||
|
||||
[0xA8] = X86_OP_ENTRYrr(AND, 0,b, I,b), /* AL, Ib */
|
||||
[0xA9] = X86_OP_ENTRYrr(AND, 0,v, I,z), /* rAX, Iz */
|
||||
[0xAA] = X86_OP_ENTRY3(STOS, Y,b, 0,b, None, None),
|
||||
[0xAB] = X86_OP_ENTRY3(STOS, Y,v, 0,v, None, None),
|
||||
[0xAA] = X86_OP_ENTRYwr(STOS, Y,b, 0,b),
|
||||
[0xAB] = X86_OP_ENTRYwr(STOS, Y,v, 0,v),
|
||||
/* Manual writeback because REP LODS (!) has to write EAX/RAX after every LODS. */
|
||||
[0xAC] = X86_OP_ENTRYr(LODS, X,b),
|
||||
[0xAD] = X86_OP_ENTRYr(LODS, X,v),
|
||||
[0xAE] = X86_OP_ENTRYrr(SCAS, 0,b, Y,b),
|
||||
[0xAF] = X86_OP_ENTRYrr(SCAS, 0,v, Y,v),
|
||||
|
||||
[0xB8] = X86_OP_ENTRY3(MOV, LoBits,v, I,v, None, None),
|
||||
[0xB9] = X86_OP_ENTRY3(MOV, LoBits,v, I,v, None, None),
|
||||
[0xBA] = X86_OP_ENTRY3(MOV, LoBits,v, I,v, None, None),
|
||||
[0xBB] = X86_OP_ENTRY3(MOV, LoBits,v, I,v, None, None),
|
||||
[0xBC] = X86_OP_ENTRY3(MOV, LoBits,v, I,v, None, None),
|
||||
[0xBD] = X86_OP_ENTRY3(MOV, LoBits,v, I,v, None, None),
|
||||
[0xBE] = X86_OP_ENTRY3(MOV, LoBits,v, I,v, None, None),
|
||||
[0xBF] = X86_OP_ENTRY3(MOV, LoBits,v, I,v, None, None),
|
||||
[0xB8] = X86_OP_ENTRYwr(MOV, LoBits,v, I,v),
|
||||
[0xB9] = X86_OP_ENTRYwr(MOV, LoBits,v, I,v),
|
||||
[0xBA] = X86_OP_ENTRYwr(MOV, LoBits,v, I,v),
|
||||
[0xBB] = X86_OP_ENTRYwr(MOV, LoBits,v, I,v),
|
||||
[0xBC] = X86_OP_ENTRYwr(MOV, LoBits,v, I,v),
|
||||
[0xBD] = X86_OP_ENTRYwr(MOV, LoBits,v, I,v),
|
||||
[0xBE] = X86_OP_ENTRYwr(MOV, LoBits,v, I,v),
|
||||
[0xBF] = X86_OP_ENTRYwr(MOV, LoBits,v, I,v),
|
||||
|
||||
[0xC8] = X86_OP_ENTRYrr(ENTER, I,w, I,b),
|
||||
[0xC9] = X86_OP_ENTRY1(LEAVE, A,d64),
|
||||
@ -1725,6 +1848,10 @@ static bool decode_op_size(DisasContext *s, X86OpEntry *e, X86OpSize size, MemOp
|
||||
*ot = s->dflag == MO_16 ? MO_32 : s->dflag;
|
||||
return true;
|
||||
|
||||
case X86_SIZE_y_d64: /* Full (not 16-bit) register access */
|
||||
*ot = CODE64(s) ? MO_64 : MO_32;
|
||||
return true;
|
||||
|
||||
case X86_SIZE_z: /* 16-bit for 16-bit operand size, else 32-bit */
|
||||
*ot = s->dflag == MO_16 ? MO_16 : MO_32;
|
||||
return true;
|
||||
@ -1802,11 +1929,34 @@ static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
|
||||
|
||||
case X86_TYPE_C: /* REG in the modrm byte selects a control register */
|
||||
op->unit = X86_OP_CR;
|
||||
goto get_reg;
|
||||
op->n = ((get_modrm(s, env) >> 3) & 7) | REX_R(s);
|
||||
if (op->n == 0 && (s->prefix & PREFIX_LOCK) &&
|
||||
(s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
|
||||
op->n = 8;
|
||||
s->prefix &= ~PREFIX_LOCK;
|
||||
}
|
||||
if (op->n != 0 && op->n != 2 && op->n != 3 && op->n != 4 && op->n != 8) {
|
||||
return false;
|
||||
}
|
||||
if (decode->e.intercept) {
|
||||
decode->e.intercept += op->n;
|
||||
}
|
||||
break;
|
||||
|
||||
case X86_TYPE_D: /* REG in the modrm byte selects a debug register */
|
||||
op->unit = X86_OP_DR;
|
||||
goto get_reg;
|
||||
op->n = ((get_modrm(s, env) >> 3) & 7) | REX_R(s);
|
||||
if (op->n >= 8) {
|
||||
/*
|
||||
* illegal opcode. The DR4 and DR5 case is checked in the generated
|
||||
* code instead, to save on hflags bits.
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
if (decode->e.intercept) {
|
||||
decode->e.intercept += op->n;
|
||||
}
|
||||
break;
|
||||
|
||||
case X86_TYPE_G: /* REG in the modrm byte selects a GPR */
|
||||
op->unit = X86_OP_INT;
|
||||
@ -2047,6 +2197,10 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid)
|
||||
return true;
|
||||
case X86_FEAT_CMOV:
|
||||
return (s->cpuid_features & CPUID_CMOV);
|
||||
case X86_FEAT_CLFLUSH:
|
||||
return (s->cpuid_features & CPUID_CLFLUSH);
|
||||
case X86_FEAT_FXSR:
|
||||
return (s->cpuid_features & CPUID_FXSR);
|
||||
case X86_FEAT_F16C:
|
||||
return (s->cpuid_ext_features & CPUID_EXT_F16C);
|
||||
case X86_FEAT_FMA:
|
||||
@ -2055,6 +2209,8 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid)
|
||||
return (s->cpuid_ext_features & CPUID_EXT_MOVBE);
|
||||
case X86_FEAT_PCLMULQDQ:
|
||||
return (s->cpuid_ext_features & CPUID_EXT_PCLMULQDQ);
|
||||
case X86_FEAT_POPCNT:
|
||||
return (s->cpuid_ext_features & CPUID_EXT_POPCNT);
|
||||
case X86_FEAT_SSE:
|
||||
return (s->cpuid_features & CPUID_SSE);
|
||||
case X86_FEAT_SSE2:
|
||||
@ -2080,6 +2236,8 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid)
|
||||
|
||||
case X86_FEAT_AVX:
|
||||
return (s->cpuid_ext_features & CPUID_EXT_AVX);
|
||||
case X86_FEAT_XSAVE:
|
||||
return (s->cpuid_ext_features & CPUID_EXT_XSAVE);
|
||||
|
||||
case X86_FEAT_3DNOW:
|
||||
return (s->cpuid_ext2_features & CPUID_EXT2_3DNOW);
|
||||
@ -2094,11 +2252,20 @@ static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid)
|
||||
return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2);
|
||||
case X86_FEAT_AVX2:
|
||||
return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_AVX2);
|
||||
case X86_FEAT_CLFLUSHOPT:
|
||||
return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT);
|
||||
case X86_FEAT_CLWB:
|
||||
return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB);
|
||||
case X86_FEAT_FSGSBASE:
|
||||
return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE);
|
||||
case X86_FEAT_SHA_NI:
|
||||
return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SHA_NI);
|
||||
|
||||
case X86_FEAT_CMPCCXADD:
|
||||
return (s->cpuid_7_1_eax_features & CPUID_7_1_EAX_CMPCCXADD);
|
||||
|
||||
case X86_FEAT_XSAVEOPT:
|
||||
return (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT);
|
||||
}
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@ -2428,18 +2595,12 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
|
||||
if (b == 0x0f) {
|
||||
b = x86_ldub_code(env, s);
|
||||
switch (b) {
|
||||
case 0x00 ... 0x03: /* mostly privileged instructions */
|
||||
case 0x05 ... 0x09:
|
||||
case 0x00 ... 0x01: /* mostly privileged instructions */
|
||||
case 0x1a ... 0x1b: /* MPX */
|
||||
case 0x20 ... 0x23: /* mov from/to CR and DR */
|
||||
case 0x30 ... 0x35: /* more privileged instructions */
|
||||
case 0xa2 ... 0xa5: /* CPUID, BT, SHLD */
|
||||
case 0xaa ... 0xae: /* RSM, SHRD, grp15 */
|
||||
case 0xb0 ... 0xb1: /* cmpxchg */
|
||||
case 0xa3: /* bt */
|
||||
case 0xab: /* bts */
|
||||
case 0xb3: /* btr */
|
||||
case 0xb8: /* integer ops */
|
||||
case 0xba ... 0xbd: /* integer ops */
|
||||
case 0xc0 ... 0xc1: /* xadd */
|
||||
case 0xba ... 0xbb: /* grp8, btc */
|
||||
case 0xc7: /* grp9 */
|
||||
disas_insn_old(s, cpu, b + 0x100);
|
||||
return;
|
||||
@ -2466,18 +2627,28 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
|
||||
|
||||
/* Checks that result in #UD come first. */
|
||||
if (decode.e.check) {
|
||||
if (decode.e.check & X86_CHECK_i64) {
|
||||
if (CODE64(s)) {
|
||||
if (CODE64(s)) {
|
||||
if (decode.e.check & X86_CHECK_i64) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if ((decode.e.check & X86_CHECK_i64_amd) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
|
||||
goto illegal_op;
|
||||
}
|
||||
} else {
|
||||
if (decode.e.check & X86_CHECK_o64) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if ((decode.e.check & X86_CHECK_o64_intel) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
|
||||
goto illegal_op;
|
||||
}
|
||||
}
|
||||
if (decode.e.check & X86_CHECK_o64) {
|
||||
if (!CODE64(s)) {
|
||||
if (decode.e.check & X86_CHECK_prot_or_vm86) {
|
||||
if (!PE(s)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
}
|
||||
if (decode.e.check & X86_CHECK_prot) {
|
||||
if (!PE(s) || VM86(s)) {
|
||||
if (decode.e.check & X86_CHECK_no_vm86) {
|
||||
if (VM86(s)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
}
|
||||
@ -2524,11 +2695,6 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
|
||||
assert(decode.op[1].unit == X86_OP_INT);
|
||||
break;
|
||||
|
||||
case X86_SPECIAL_NoSeg:
|
||||
decode.mem.def_seg = -1;
|
||||
s->override = -1;
|
||||
break;
|
||||
|
||||
case X86_SPECIAL_Op0_Mw:
|
||||
assert(decode.op[0].unit == X86_OP_INT);
|
||||
if (decode.op[0].has_ea) {
|
||||
@ -2556,19 +2722,21 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
|
||||
* exceptions if there is no memory operand). Exceptions are
|
||||
* vm86 checks (INTn, IRET, PUSHF/POPF), RSM and XSETBV (!).
|
||||
*
|
||||
* RSM and XSETBV will be handled in the gen_* functions
|
||||
* instead of using chk().
|
||||
* XSETBV will check for CPL0 in the gen_* function instead of using chk().
|
||||
*/
|
||||
if (decode.e.check & X86_CHECK_cpl0) {
|
||||
if (CPL(s) != 0) {
|
||||
goto gp_fault;
|
||||
}
|
||||
}
|
||||
if (decode.e.intercept && unlikely(GUEST(s))) {
|
||||
if (decode.e.has_intercept && unlikely(GUEST(s))) {
|
||||
gen_helper_svm_check_intercept(tcg_env,
|
||||
tcg_constant_i32(decode.e.intercept));
|
||||
}
|
||||
if (decode.e.check) {
|
||||
if ((decode.e.check & X86_CHECK_smm) && !(s->flags & HF_SMM_MASK)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if ((decode.e.check & X86_CHECK_vm86_iopl) && VM86(s)) {
|
||||
if (IOPL(s) < 3) {
|
||||
goto gp_fault;
|
||||
@ -2585,12 +2753,13 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
|
||||
gen_helper_enter_mmx(tcg_env);
|
||||
}
|
||||
|
||||
if (decode.op[0].has_ea || decode.op[1].has_ea || decode.op[2].has_ea) {
|
||||
if (decode.e.special != X86_SPECIAL_NoLoadEA &&
|
||||
(decode.op[0].has_ea || decode.op[1].has_ea || decode.op[2].has_ea)) {
|
||||
gen_load_ea(s, &decode.mem, decode.e.vex_class == 12);
|
||||
}
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
gen_load(s, &decode, 2, s->T1);
|
||||
decode.e.gen(s, env, &decode);
|
||||
decode.e.gen(s, &decode);
|
||||
} else {
|
||||
if (decode.op[0].unit == X86_OP_MMX) {
|
||||
compute_mmx_offset(&decode.op[0]);
|
||||
@ -2599,12 +2768,12 @@ static void disas_insn(DisasContext *s, CPUState *cpu)
|
||||
}
|
||||
gen_load(s, &decode, 1, s->T0);
|
||||
gen_load(s, &decode, 2, s->T1);
|
||||
decode.e.gen(s, env, &decode);
|
||||
decode.e.gen(s, &decode);
|
||||
gen_writeback(s, &decode, 0, s->T0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write back flags after last memory access. Some newer ALU instructions, as
|
||||
* Write back flags after last memory access. Some older ALU instructions, as
|
||||
* well as SSE instructions, write flags in the gen_* function, but that can
|
||||
* cause incorrect tracking of CC_OP for instructions that write to both memory
|
||||
* and flags.
|
||||
|
@ -90,6 +90,7 @@ typedef enum X86OpSize {
|
||||
X86_SIZE_w, /* 16-bit */
|
||||
X86_SIZE_x, /* 128/256-bit, based on operand size */
|
||||
X86_SIZE_y, /* 32/64-bit, based on operand size */
|
||||
X86_SIZE_y_d64, /* 32/64-bit, based on 64-bit mode */
|
||||
X86_SIZE_z, /* 16-bit for 16-bit operand size, else 32-bit */
|
||||
X86_SIZE_z_f64, /* 32-bit for 32-bit operand size or 64-bit mode, else 16-bit */
|
||||
|
||||
@ -108,12 +109,18 @@ typedef enum X86CPUIDFeature {
|
||||
X86_FEAT_AVX2,
|
||||
X86_FEAT_BMI1,
|
||||
X86_FEAT_BMI2,
|
||||
X86_FEAT_CLFLUSH,
|
||||
X86_FEAT_CLFLUSHOPT,
|
||||
X86_FEAT_CLWB,
|
||||
X86_FEAT_CMOV,
|
||||
X86_FEAT_CMPCCXADD,
|
||||
X86_FEAT_F16C,
|
||||
X86_FEAT_FMA,
|
||||
X86_FEAT_FSGSBASE,
|
||||
X86_FEAT_FXSR,
|
||||
X86_FEAT_MOVBE,
|
||||
X86_FEAT_PCLMULQDQ,
|
||||
X86_FEAT_POPCNT,
|
||||
X86_FEAT_SHA_NI,
|
||||
X86_FEAT_SSE,
|
||||
X86_FEAT_SSE2,
|
||||
@ -122,6 +129,8 @@ typedef enum X86CPUIDFeature {
|
||||
X86_FEAT_SSE41,
|
||||
X86_FEAT_SSE42,
|
||||
X86_FEAT_SSE4A,
|
||||
X86_FEAT_XSAVE,
|
||||
X86_FEAT_XSAVEOPT,
|
||||
} X86CPUIDFeature;
|
||||
|
||||
/* Execution flags */
|
||||
@ -142,8 +151,8 @@ typedef enum X86InsnCheck {
|
||||
X86_CHECK_i64 = 1,
|
||||
X86_CHECK_o64 = 2,
|
||||
|
||||
/* Fault outside protected mode */
|
||||
X86_CHECK_prot = 4,
|
||||
/* Fault in vm86 mode */
|
||||
X86_CHECK_no_vm86 = 4,
|
||||
|
||||
/* Privileged instruction checks */
|
||||
X86_CHECK_cpl0 = 8,
|
||||
@ -159,6 +168,17 @@ typedef enum X86InsnCheck {
|
||||
|
||||
/* Fault if VEX.W=0 */
|
||||
X86_CHECK_W1 = 256,
|
||||
|
||||
/* Fault outside protected mode, possibly including vm86 mode */
|
||||
X86_CHECK_prot_or_vm86 = 512,
|
||||
X86_CHECK_prot = X86_CHECK_prot_or_vm86 | X86_CHECK_no_vm86,
|
||||
|
||||
/* Fault outside SMM */
|
||||
X86_CHECK_smm = 1024,
|
||||
|
||||
/* Vendor-specific checks for Intel/AMD differences */
|
||||
X86_CHECK_i64_amd = 2048,
|
||||
X86_CHECK_o64_intel = 4096,
|
||||
} X86InsnCheck;
|
||||
|
||||
typedef enum X86InsnSpecial {
|
||||
@ -170,8 +190,9 @@ typedef enum X86InsnSpecial {
|
||||
/* Always locked if it has a memory operand (XCHG) */
|
||||
X86_SPECIAL_Locked,
|
||||
|
||||
/* Do not apply segment base to effective address */
|
||||
X86_SPECIAL_NoSeg,
|
||||
/* Do not load effective address in s->A0 */
|
||||
X86_SPECIAL_NoLoadEA,
|
||||
|
||||
/*
|
||||
* Rd/Mb or Rd/Mw in the manual: register operand 0 is treated as 32 bits
|
||||
* (and writeback zero-extends it to 64 bits if applicable). PREFIX_DATA
|
||||
@ -245,7 +266,7 @@ typedef struct X86DecodedInsn X86DecodedInsn;
|
||||
typedef void (*X86DecodeFunc)(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b);
|
||||
|
||||
/* Code generation function. */
|
||||
typedef void (*X86GenFunc)(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode);
|
||||
typedef void (*X86GenFunc)(DisasContext *s, X86DecodedInsn *decode);
|
||||
|
||||
struct X86OpEntry {
|
||||
/* Based on the is_decode flags. */
|
||||
@ -271,6 +292,7 @@ struct X86OpEntry {
|
||||
unsigned valid_prefix:16;
|
||||
unsigned check:16;
|
||||
unsigned intercept:8;
|
||||
bool has_intercept:1;
|
||||
bool is_decode:1;
|
||||
};
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2265,11 +2265,11 @@ void helper_sysexit(CPUX86State *env, int dflag)
|
||||
target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
|
||||
{
|
||||
unsigned int limit;
|
||||
uint32_t e1, e2, eflags, selector;
|
||||
uint32_t e1, e2, selector;
|
||||
int rpl, dpl, cpl, type;
|
||||
|
||||
selector = selector1 & 0xffff;
|
||||
eflags = cpu_cc_compute_all(env);
|
||||
assert(CC_OP == CC_OP_EFLAGS);
|
||||
if ((selector & 0xfffc) == 0) {
|
||||
goto fail;
|
||||
}
|
||||
@ -2301,22 +2301,22 @@ target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
|
||||
}
|
||||
if (dpl < cpl || dpl < rpl) {
|
||||
fail:
|
||||
CC_SRC = eflags & ~CC_Z;
|
||||
CC_SRC &= ~CC_Z;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
limit = get_seg_limit(e1, e2);
|
||||
CC_SRC = eflags | CC_Z;
|
||||
CC_SRC |= CC_Z;
|
||||
return limit;
|
||||
}
|
||||
|
||||
target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
|
||||
{
|
||||
uint32_t e1, e2, eflags, selector;
|
||||
uint32_t e1, e2, selector;
|
||||
int rpl, dpl, cpl, type;
|
||||
|
||||
selector = selector1 & 0xffff;
|
||||
eflags = cpu_cc_compute_all(env);
|
||||
assert(CC_OP == CC_OP_EFLAGS);
|
||||
if ((selector & 0xfffc) == 0) {
|
||||
goto fail;
|
||||
}
|
||||
@ -2351,11 +2351,11 @@ target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
|
||||
}
|
||||
if (dpl < cpl || dpl < rpl) {
|
||||
fail:
|
||||
CC_SRC = eflags & ~CC_Z;
|
||||
CC_SRC &= ~CC_Z;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
CC_SRC = eflags | CC_Z;
|
||||
CC_SRC |= CC_Z;
|
||||
return e2 & 0x00f0ff00;
|
||||
}
|
||||
|
||||
|
@ -63,23 +63,13 @@ target_ulong helper_inl(CPUX86State *env, uint32_t port)
|
||||
cpu_get_mem_attrs(env), NULL);
|
||||
}
|
||||
|
||||
target_ulong helper_read_crN(CPUX86State *env, int reg)
|
||||
target_ulong helper_read_cr8(CPUX86State *env)
|
||||
{
|
||||
target_ulong val;
|
||||
|
||||
switch (reg) {
|
||||
default:
|
||||
val = env->cr[reg];
|
||||
break;
|
||||
case 8:
|
||||
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
||||
val = cpu_get_apic_tpr(env_archcpu(env)->apic_state);
|
||||
} else {
|
||||
val = env->int_ctl & V_TPR_MASK;
|
||||
}
|
||||
break;
|
||||
if (!(env->hflags2 & HF2_VINTR_MASK)) {
|
||||
return cpu_get_apic_tpr(env_archcpu(env)->apic_state);
|
||||
} else {
|
||||
return env->int_ctl & V_TPR_MASK;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
void helper_write_crN(CPUX86State *env, int reg, target_ulong t0)
|
||||
|
@ -246,10 +246,6 @@ STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
|
||||
STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
|
||||
STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
|
||||
STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
|
||||
STUB_HELPER(rdmsr, TCGv_env env)
|
||||
STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
|
||||
STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
|
||||
STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
|
||||
STUB_HELPER(stgi, TCGv_env env)
|
||||
STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
|
||||
STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
|
||||
@ -257,7 +253,6 @@ STUB_HELPER(vmmcall, TCGv_env env)
|
||||
STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
|
||||
STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
|
||||
STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
|
||||
STUB_HELPER(wrmsr, TCGv_env env)
|
||||
#endif
|
||||
|
||||
static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
|
||||
@ -439,13 +434,6 @@ static inline MemOp mo_stacksize(DisasContext *s)
|
||||
return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
|
||||
}
|
||||
|
||||
/* Select size 8 if lsb of B is clear, else OT. Used for decoding
|
||||
byte vs word opcodes. */
|
||||
static inline MemOp mo_b_d(int b, MemOp ot)
|
||||
{
|
||||
return b & 1 ? ot : MO_8;
|
||||
}
|
||||
|
||||
/* Compute the result of writing t0 to the OT-sized register REG.
|
||||
*
|
||||
* If DEST is NULL, store the result into the register and return the
|
||||
@ -540,15 +528,6 @@ static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
|
||||
tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
|
||||
}
|
||||
|
||||
static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
|
||||
{
|
||||
if (d == OR_TMP0) {
|
||||
gen_op_st_v(s, idx, s->T0, s->A0);
|
||||
} else {
|
||||
gen_op_mov_reg_v(s, idx, d, s->T0);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_update_eip_next(DisasContext *s)
|
||||
{
|
||||
assert(s->pc_save != -1);
|
||||
@ -729,11 +708,6 @@ static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
|
||||
return dst;
|
||||
}
|
||||
|
||||
static void gen_extu(MemOp ot, TCGv reg)
|
||||
{
|
||||
gen_ext_tl(reg, reg, ot, false);
|
||||
}
|
||||
|
||||
static void gen_exts(MemOp ot, TCGv reg)
|
||||
{
|
||||
gen_ext_tl(reg, reg, ot, true);
|
||||
@ -837,17 +811,6 @@ static void gen_movs(DisasContext *s, MemOp ot)
|
||||
gen_op_add_reg(s, s->aflag, R_EDI, dshift);
|
||||
}
|
||||
|
||||
static void gen_op_update1_cc(DisasContext *s)
|
||||
{
|
||||
tcg_gen_mov_tl(cpu_cc_dst, s->T0);
|
||||
}
|
||||
|
||||
static void gen_op_update2_cc(DisasContext *s)
|
||||
{
|
||||
tcg_gen_mov_tl(cpu_cc_src, s->T1);
|
||||
tcg_gen_mov_tl(cpu_cc_dst, s->T0);
|
||||
}
|
||||
|
||||
/* compute all eflags to reg */
|
||||
static void gen_mov_eflags(DisasContext *s, TCGv reg)
|
||||
{
|
||||
@ -1448,64 +1411,11 @@ static bool check_cpl0(DisasContext *s)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
|
||||
TCGv shm1, TCGv count, bool is_right)
|
||||
{
|
||||
TCGv_i32 z32, s32, oldop;
|
||||
TCGv z_tl;
|
||||
|
||||
/* Store the results into the CC variables. If we know that the
|
||||
variable must be dead, store unconditionally. Otherwise we'll
|
||||
need to not disrupt the current contents. */
|
||||
z_tl = tcg_constant_tl(0);
|
||||
if (cc_op_live[s->cc_op] & USES_CC_DST) {
|
||||
tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
|
||||
result, cpu_cc_dst);
|
||||
} else {
|
||||
tcg_gen_mov_tl(cpu_cc_dst, result);
|
||||
}
|
||||
if (cc_op_live[s->cc_op] & USES_CC_SRC) {
|
||||
tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
|
||||
shm1, cpu_cc_src);
|
||||
} else {
|
||||
tcg_gen_mov_tl(cpu_cc_src, shm1);
|
||||
}
|
||||
|
||||
/* Get the two potential CC_OP values into temporaries. */
|
||||
tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
|
||||
if (s->cc_op == CC_OP_DYNAMIC) {
|
||||
oldop = cpu_cc_op;
|
||||
} else {
|
||||
tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
|
||||
oldop = s->tmp3_i32;
|
||||
}
|
||||
|
||||
/* Conditionally store the CC_OP value. */
|
||||
z32 = tcg_constant_i32(0);
|
||||
s32 = tcg_temp_new_i32();
|
||||
tcg_gen_trunc_tl_i32(s32, count);
|
||||
tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
|
||||
|
||||
/* The CC_OP value is no longer predictable. */
|
||||
set_cc_op(s, CC_OP_DYNAMIC);
|
||||
}
|
||||
|
||||
/* XXX: add faster immediate case */
|
||||
static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
|
||||
bool is_right, TCGv count_in)
|
||||
static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot,
|
||||
bool is_right, TCGv count)
|
||||
{
|
||||
target_ulong mask = (ot == MO_64 ? 63 : 31);
|
||||
TCGv count;
|
||||
|
||||
/* load */
|
||||
if (op1 == OR_TMP0) {
|
||||
gen_op_ld_v(s, ot, s->T0, s->A0);
|
||||
} else {
|
||||
gen_op_mov_v_reg(s, ot, s->T0, op1);
|
||||
}
|
||||
|
||||
count = tcg_temp_new();
|
||||
tcg_gen_andi_tl(count, count_in, mask);
|
||||
|
||||
switch (ot) {
|
||||
case MO_16:
|
||||
@ -1567,11 +1477,6 @@ static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
|
||||
tcg_gen_or_tl(s->T0, s->T0, s->T1);
|
||||
break;
|
||||
}
|
||||
|
||||
/* store */
|
||||
gen_op_st_rm_T0_A0(s, ot, op1);
|
||||
|
||||
gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
|
||||
}
|
||||
|
||||
#define X86_MAX_INSN_LENGTH 15
|
||||
@ -3081,108 +2986,11 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
|
||||
CPUX86State *env = cpu_env(cpu);
|
||||
int prefixes = s->prefix;
|
||||
MemOp dflag = s->dflag;
|
||||
int shift;
|
||||
MemOp ot;
|
||||
int modrm, reg, rm, mod, op, opreg, val;
|
||||
int modrm, reg, rm, mod, op, val;
|
||||
|
||||
/* now check op code */
|
||||
switch (b) {
|
||||
/**************************/
|
||||
/* arith & logic */
|
||||
case 0x1c0:
|
||||
case 0x1c1: /* xadd Ev, Gv */
|
||||
ot = mo_b_d(b, dflag);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | REX_R(s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
gen_op_mov_v_reg(s, ot, s->T0, reg);
|
||||
if (mod == 3) {
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
gen_op_mov_v_reg(s, ot, s->T1, rm);
|
||||
tcg_gen_add_tl(s->T0, s->T0, s->T1);
|
||||
gen_op_mov_reg_v(s, ot, reg, s->T1);
|
||||
gen_op_mov_reg_v(s, ot, rm, s->T0);
|
||||
} else {
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
|
||||
s->mem_index, ot | MO_LE);
|
||||
tcg_gen_add_tl(s->T0, s->T0, s->T1);
|
||||
} else {
|
||||
gen_op_ld_v(s, ot, s->T1, s->A0);
|
||||
tcg_gen_add_tl(s->T0, s->T0, s->T1);
|
||||
gen_op_st_v(s, ot, s->T0, s->A0);
|
||||
}
|
||||
gen_op_mov_reg_v(s, ot, reg, s->T1);
|
||||
}
|
||||
gen_op_update2_cc(s);
|
||||
set_cc_op(s, CC_OP_ADDB + ot);
|
||||
break;
|
||||
case 0x1b0:
|
||||
case 0x1b1: /* cmpxchg Ev, Gv */
|
||||
{
|
||||
TCGv oldv, newv, cmpv, dest;
|
||||
|
||||
ot = mo_b_d(b, dflag);
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | REX_R(s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
oldv = tcg_temp_new();
|
||||
newv = tcg_temp_new();
|
||||
cmpv = tcg_temp_new();
|
||||
gen_op_mov_v_reg(s, ot, newv, reg);
|
||||
tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
|
||||
gen_extu(ot, cmpv);
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
if (mod == 3) {
|
||||
goto illegal_op;
|
||||
}
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
|
||||
s->mem_index, ot | MO_LE);
|
||||
} else {
|
||||
if (mod == 3) {
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
gen_op_mov_v_reg(s, ot, oldv, rm);
|
||||
gen_extu(ot, oldv);
|
||||
|
||||
/*
|
||||
* Unlike the memory case, where "the destination operand receives
|
||||
* a write cycle without regard to the result of the comparison",
|
||||
* rm must not be touched altogether if the write fails, including
|
||||
* not zero-extending it on 64-bit processors. So, precompute
|
||||
* the result of a successful writeback and perform the movcond
|
||||
* directly on cpu_regs. Also need to write accumulator first, in
|
||||
* case rm is part of RAX too.
|
||||
*/
|
||||
dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
|
||||
tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
|
||||
} else {
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
gen_op_ld_v(s, ot, oldv, s->A0);
|
||||
|
||||
/*
|
||||
* Perform an unconditional store cycle like physical cpu;
|
||||
* must be before changing accumulator to ensure
|
||||
* idempotency if the store faults and the instruction
|
||||
* is restarted
|
||||
*/
|
||||
tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
|
||||
gen_op_st_v(s, ot, newv, s->A0);
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Write EAX only if the cmpxchg fails; reuse newv as the destination,
|
||||
* since it's dead here.
|
||||
*/
|
||||
dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
|
||||
tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
|
||||
tcg_gen_mov_tl(cpu_cc_src, oldv);
|
||||
tcg_gen_mov_tl(s->cc_srcT, cmpv);
|
||||
tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
|
||||
set_cc_op(s, CC_OP_SUBB + ot);
|
||||
}
|
||||
break;
|
||||
case 0x1c7: /* cmpxchg8b */
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
@ -3245,45 +3053,6 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
|
||||
}
|
||||
break;
|
||||
|
||||
/**************************/
|
||||
/* shifts */
|
||||
case 0x1a4: /* shld imm */
|
||||
op = 0;
|
||||
shift = 1;
|
||||
goto do_shiftd;
|
||||
case 0x1a5: /* shld cl */
|
||||
op = 0;
|
||||
shift = 0;
|
||||
goto do_shiftd;
|
||||
case 0x1ac: /* shrd imm */
|
||||
op = 1;
|
||||
shift = 1;
|
||||
goto do_shiftd;
|
||||
case 0x1ad: /* shrd cl */
|
||||
op = 1;
|
||||
shift = 0;
|
||||
do_shiftd:
|
||||
ot = dflag;
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
reg = ((modrm >> 3) & 7) | REX_R(s);
|
||||
if (mod != 3) {
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
opreg = OR_TMP0;
|
||||
} else {
|
||||
opreg = rm;
|
||||
}
|
||||
gen_op_mov_v_reg(s, ot, s->T1, reg);
|
||||
|
||||
if (shift) {
|
||||
TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
|
||||
gen_shiftd_rm_T1(s, ot, opreg, op, imm);
|
||||
} else {
|
||||
gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
|
||||
}
|
||||
break;
|
||||
|
||||
/************************/
|
||||
/* bit operations */
|
||||
case 0x1ba: /* bt/bts/btr/btc Gv, im */
|
||||
@ -3423,147 +3192,6 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 0x1bc: /* bsf / tzcnt */
|
||||
case 0x1bd: /* bsr / lzcnt */
|
||||
ot = dflag;
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | REX_R(s);
|
||||
gen_ld_modrm(env, s, modrm, ot);
|
||||
gen_extu(ot, s->T0);
|
||||
|
||||
/* Note that lzcnt and tzcnt are in different extensions. */
|
||||
if ((prefixes & PREFIX_REPZ)
|
||||
&& (b & 1
|
||||
? s->cpuid_ext3_features & CPUID_EXT3_ABM
|
||||
: s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
|
||||
int size = 8 << ot;
|
||||
/* For lzcnt/tzcnt, C bit is defined related to the input. */
|
||||
tcg_gen_mov_tl(cpu_cc_src, s->T0);
|
||||
if (b & 1) {
|
||||
/* For lzcnt, reduce the target_ulong result by the
|
||||
number of zeros that we expect to find at the top. */
|
||||
tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
|
||||
tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
|
||||
} else {
|
||||
/* For tzcnt, a zero input must return the operand size. */
|
||||
tcg_gen_ctzi_tl(s->T0, s->T0, size);
|
||||
}
|
||||
/* For lzcnt/tzcnt, Z bit is defined related to the result. */
|
||||
gen_op_update1_cc(s);
|
||||
set_cc_op(s, CC_OP_BMILGB + ot);
|
||||
} else {
|
||||
/* For bsr/bsf, only the Z bit is defined and it is related
|
||||
to the input and not the result. */
|
||||
tcg_gen_mov_tl(cpu_cc_dst, s->T0);
|
||||
set_cc_op(s, CC_OP_LOGICB + ot);
|
||||
|
||||
/* ??? The manual says that the output is undefined when the
|
||||
input is zero, but real hardware leaves it unchanged, and
|
||||
real programs appear to depend on that. Accomplish this
|
||||
by passing the output as the value to return upon zero. */
|
||||
if (b & 1) {
|
||||
/* For bsr, return the bit index of the first 1 bit,
|
||||
not the count of leading zeros. */
|
||||
tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
|
||||
tcg_gen_clz_tl(s->T0, s->T0, s->T1);
|
||||
tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
|
||||
} else {
|
||||
tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
|
||||
}
|
||||
}
|
||||
gen_op_mov_reg_v(s, ot, reg, s->T0);
|
||||
break;
|
||||
case 0x130: /* wrmsr */
|
||||
case 0x132: /* rdmsr */
|
||||
if (check_cpl0(s)) {
|
||||
gen_update_cc_op(s);
|
||||
gen_update_eip_cur(s);
|
||||
if (b & 2) {
|
||||
gen_helper_rdmsr(tcg_env);
|
||||
} else {
|
||||
gen_helper_wrmsr(tcg_env);
|
||||
s->base.is_jmp = DISAS_EOB_NEXT;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 0x131: /* rdtsc */
|
||||
gen_update_cc_op(s);
|
||||
gen_update_eip_cur(s);
|
||||
translator_io_start(&s->base);
|
||||
gen_helper_rdtsc(tcg_env);
|
||||
break;
|
||||
case 0x133: /* rdpmc */
|
||||
gen_update_cc_op(s);
|
||||
gen_update_eip_cur(s);
|
||||
gen_helper_rdpmc(tcg_env);
|
||||
s->base.is_jmp = DISAS_NORETURN;
|
||||
break;
|
||||
case 0x134: /* sysenter */
|
||||
/* For AMD SYSENTER is not valid in long mode */
|
||||
if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (!PE(s)) {
|
||||
gen_exception_gpf(s);
|
||||
} else {
|
||||
gen_helper_sysenter(tcg_env);
|
||||
s->base.is_jmp = DISAS_EOB_ONLY;
|
||||
}
|
||||
break;
|
||||
case 0x135: /* sysexit */
|
||||
/* For AMD SYSEXIT is not valid in long mode */
|
||||
if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (!PE(s) || CPL(s) != 0) {
|
||||
gen_exception_gpf(s);
|
||||
} else {
|
||||
gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
|
||||
s->base.is_jmp = DISAS_EOB_ONLY;
|
||||
}
|
||||
break;
|
||||
case 0x105: /* syscall */
|
||||
/* For Intel SYSCALL is only valid in long mode */
|
||||
if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
|
||||
goto illegal_op;
|
||||
}
|
||||
gen_update_cc_op(s);
|
||||
gen_update_eip_cur(s);
|
||||
gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
|
||||
/* condition codes are modified only in long mode */
|
||||
if (LMA(s)) {
|
||||
assume_cc_op(s, CC_OP_EFLAGS);
|
||||
}
|
||||
/* TF handling for the syscall insn is different. The TF bit is checked
|
||||
after the syscall insn completes. This allows #DB to not be
|
||||
generated after one has entered CPL0 if TF is set in FMASK. */
|
||||
s->base.is_jmp = DISAS_EOB_RECHECK_TF;
|
||||
break;
|
||||
case 0x107: /* sysret */
|
||||
/* For Intel SYSRET is only valid in long mode */
|
||||
if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (!PE(s) || CPL(s) != 0) {
|
||||
gen_exception_gpf(s);
|
||||
} else {
|
||||
gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
|
||||
/* condition codes are modified only in long mode */
|
||||
if (LMA(s)) {
|
||||
assume_cc_op(s, CC_OP_EFLAGS);
|
||||
}
|
||||
/* TF handling for the sysret insn is different. The TF bit is
|
||||
checked after the sysret insn completes. This allows #DB to be
|
||||
generated "as if" the syscall insn in userspace has just
|
||||
completed. */
|
||||
s->base.is_jmp = DISAS_EOB_RECHECK_TF;
|
||||
}
|
||||
break;
|
||||
case 0x1a2: /* cpuid */
|
||||
gen_update_cc_op(s);
|
||||
gen_update_eip_cur(s);
|
||||
gen_helper_cpuid(tcg_env);
|
||||
break;
|
||||
case 0x100:
|
||||
modrm = x86_ldub_code(env, s);
|
||||
mod = (modrm >> 6) & 3;
|
||||
@ -3967,39 +3595,6 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
|
||||
}
|
||||
break;
|
||||
|
||||
case 0x108: /* invd */
|
||||
case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
|
||||
if (check_cpl0(s)) {
|
||||
gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
|
||||
/* nothing to do */
|
||||
}
|
||||
break;
|
||||
case 0x102: /* lar */
|
||||
case 0x103: /* lsl */
|
||||
{
|
||||
TCGLabel *label1;
|
||||
TCGv t0;
|
||||
if (!PE(s) || VM86(s))
|
||||
goto illegal_op;
|
||||
ot = dflag != MO_16 ? MO_32 : MO_16;
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | REX_R(s);
|
||||
gen_ld_modrm(env, s, modrm, MO_16);
|
||||
t0 = tcg_temp_new();
|
||||
gen_update_cc_op(s);
|
||||
if (b == 0x102) {
|
||||
gen_helper_lar(t0, tcg_env, s->T0);
|
||||
} else {
|
||||
gen_helper_lsl(t0, tcg_env, s->T0);
|
||||
}
|
||||
tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
|
||||
label1 = gen_new_label();
|
||||
tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
|
||||
gen_op_mov_reg_v(s, ot, reg, t0);
|
||||
gen_set_label(label1);
|
||||
set_cc_op(s, CC_OP_EFLAGS);
|
||||
}
|
||||
break;
|
||||
case 0x11a:
|
||||
modrm = x86_ldub_code(env, s);
|
||||
if (s->flags & HF_MPX_EN_MASK) {
|
||||
@ -4191,311 +3786,6 @@ static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
|
||||
}
|
||||
gen_nop_modrm(env, s, modrm);
|
||||
break;
|
||||
|
||||
case 0x120: /* mov reg, crN */
|
||||
case 0x122: /* mov crN, reg */
|
||||
if (!check_cpl0(s)) {
|
||||
break;
|
||||
}
|
||||
modrm = x86_ldub_code(env, s);
|
||||
/*
|
||||
* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
|
||||
* AMD documentation (24594.pdf) and testing of Intel 386 and 486
|
||||
* processors all show that the mod bits are assumed to be 1's,
|
||||
* regardless of actual values.
|
||||
*/
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
reg = ((modrm >> 3) & 7) | REX_R(s);
|
||||
switch (reg) {
|
||||
case 0:
|
||||
if ((prefixes & PREFIX_LOCK) &&
|
||||
(s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
|
||||
reg = 8;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
case 3:
|
||||
case 4:
|
||||
case 8:
|
||||
break;
|
||||
default:
|
||||
goto unknown_op;
|
||||
}
|
||||
ot = (CODE64(s) ? MO_64 : MO_32);
|
||||
|
||||
translator_io_start(&s->base);
|
||||
if (b & 2) {
|
||||
gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
|
||||
gen_op_mov_v_reg(s, ot, s->T0, rm);
|
||||
gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
|
||||
s->base.is_jmp = DISAS_EOB_NEXT;
|
||||
} else {
|
||||
gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
|
||||
gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
|
||||
gen_op_mov_reg_v(s, ot, rm, s->T0);
|
||||
}
|
||||
break;
|
||||
|
||||
case 0x121: /* mov reg, drN */
|
||||
case 0x123: /* mov drN, reg */
|
||||
if (check_cpl0(s)) {
|
||||
modrm = x86_ldub_code(env, s);
|
||||
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
|
||||
* AMD documentation (24594.pdf) and testing of
|
||||
* intel 386 and 486 processors all show that the mod bits
|
||||
* are assumed to be 1's, regardless of actual values.
|
||||
*/
|
||||
rm = (modrm & 7) | REX_B(s);
|
||||
reg = ((modrm >> 3) & 7) | REX_R(s);
|
||||
if (CODE64(s))
|
||||
ot = MO_64;
|
||||
else
|
||||
ot = MO_32;
|
||||
if (reg >= 8) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (b & 2) {
|
||||
gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
|
||||
gen_op_mov_v_reg(s, ot, s->T0, rm);
|
||||
tcg_gen_movi_i32(s->tmp2_i32, reg);
|
||||
gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
|
||||
s->base.is_jmp = DISAS_EOB_NEXT;
|
||||
} else {
|
||||
gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
|
||||
tcg_gen_movi_i32(s->tmp2_i32, reg);
|
||||
gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
|
||||
gen_op_mov_reg_v(s, ot, rm, s->T0);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case 0x106: /* clts */
|
||||
if (check_cpl0(s)) {
|
||||
gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
|
||||
gen_helper_clts(tcg_env);
|
||||
/* abort block because static cpu state changed */
|
||||
s->base.is_jmp = DISAS_EOB_NEXT;
|
||||
}
|
||||
break;
|
||||
/* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
|
||||
case 0x1ae:
|
||||
modrm = x86_ldub_code(env, s);
|
||||
switch (modrm) {
|
||||
CASE_MODRM_MEM_OP(0): /* fxsave */
|
||||
if (!(s->cpuid_features & CPUID_FXSR)
|
||||
|| (prefixes & PREFIX_LOCK)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
|
||||
gen_exception(s, EXCP07_PREX);
|
||||
break;
|
||||
}
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
gen_helper_fxsave(tcg_env, s->A0);
|
||||
break;
|
||||
|
||||
CASE_MODRM_MEM_OP(1): /* fxrstor */
|
||||
if (!(s->cpuid_features & CPUID_FXSR)
|
||||
|| (prefixes & PREFIX_LOCK)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
|
||||
gen_exception(s, EXCP07_PREX);
|
||||
break;
|
||||
}
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
gen_helper_fxrstor(tcg_env, s->A0);
|
||||
break;
|
||||
|
||||
CASE_MODRM_MEM_OP(2): /* ldmxcsr */
|
||||
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (s->flags & HF_TS_MASK) {
|
||||
gen_exception(s, EXCP07_PREX);
|
||||
break;
|
||||
}
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
|
||||
gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
|
||||
break;
|
||||
|
||||
CASE_MODRM_MEM_OP(3): /* stmxcsr */
|
||||
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (s->flags & HF_TS_MASK) {
|
||||
gen_exception(s, EXCP07_PREX);
|
||||
break;
|
||||
}
|
||||
gen_helper_update_mxcsr(tcg_env);
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
|
||||
gen_op_st_v(s, MO_32, s->T0, s->A0);
|
||||
break;
|
||||
|
||||
CASE_MODRM_MEM_OP(4): /* xsave */
|
||||
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|
||||
|| (prefixes & (PREFIX_LOCK | PREFIX_DATA
|
||||
| PREFIX_REPZ | PREFIX_REPNZ))) {
|
||||
goto illegal_op;
|
||||
}
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
|
||||
cpu_regs[R_EDX]);
|
||||
gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
|
||||
break;
|
||||
|
||||
CASE_MODRM_MEM_OP(5): /* xrstor */
|
||||
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|
||||
|| (prefixes & (PREFIX_LOCK | PREFIX_DATA
|
||||
| PREFIX_REPZ | PREFIX_REPNZ))) {
|
||||
goto illegal_op;
|
||||
}
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
|
||||
cpu_regs[R_EDX]);
|
||||
gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
|
||||
/* XRSTOR is how MPX is enabled, which changes how
|
||||
we translate. Thus we need to end the TB. */
|
||||
s->base.is_jmp = DISAS_EOB_NEXT;
|
||||
break;
|
||||
|
||||
CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
|
||||
if (prefixes & PREFIX_LOCK) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (prefixes & PREFIX_DATA) {
|
||||
/* clwb */
|
||||
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
gen_nop_modrm(env, s, modrm);
|
||||
} else {
|
||||
/* xsaveopt */
|
||||
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|
||||
|| (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
|
||||
|| (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
|
||||
goto illegal_op;
|
||||
}
|
||||
gen_lea_modrm(env, s, modrm);
|
||||
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
|
||||
cpu_regs[R_EDX]);
|
||||
gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
|
||||
}
|
||||
break;
|
||||
|
||||
CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
|
||||
if (prefixes & PREFIX_LOCK) {
|
||||
goto illegal_op;
|
||||
}
|
||||
if (prefixes & PREFIX_DATA) {
|
||||
/* clflushopt */
|
||||
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
} else {
|
||||
/* clflush */
|
||||
if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
|
||||
|| !(s->cpuid_features & CPUID_CLFLUSH)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
}
|
||||
gen_nop_modrm(env, s, modrm);
|
||||
break;
|
||||
|
||||
case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
|
||||
case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
|
||||
case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
|
||||
case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
|
||||
if (CODE64(s)
|
||||
&& (prefixes & PREFIX_REPZ)
|
||||
&& !(prefixes & PREFIX_LOCK)
|
||||
&& (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
|
||||
TCGv base, treg, src, dst;
|
||||
|
||||
/* Preserve hflags bits by testing CR4 at runtime. */
|
||||
tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
|
||||
gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
|
||||
|
||||
base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
|
||||
treg = cpu_regs[(modrm & 7) | REX_B(s)];
|
||||
|
||||
if (modrm & 0x10) {
|
||||
/* wr*base */
|
||||
dst = base, src = treg;
|
||||
} else {
|
||||
/* rd*base */
|
||||
dst = treg, src = base;
|
||||
}
|
||||
|
||||
if (s->dflag == MO_32) {
|
||||
tcg_gen_ext32u_tl(dst, src);
|
||||
} else {
|
||||
tcg_gen_mov_tl(dst, src);
|
||||
}
|
||||
break;
|
||||
}
|
||||
goto unknown_op;
|
||||
|
||||
case 0xf8 ... 0xff: /* sfence */
|
||||
if (!(s->cpuid_features & CPUID_SSE)
|
||||
|| (prefixes & PREFIX_LOCK)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
|
||||
break;
|
||||
case 0xe8 ... 0xef: /* lfence */
|
||||
if (!(s->cpuid_features & CPUID_SSE)
|
||||
|| (prefixes & PREFIX_LOCK)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
|
||||
break;
|
||||
case 0xf0 ... 0xf7: /* mfence */
|
||||
if (!(s->cpuid_features & CPUID_SSE2)
|
||||
|| (prefixes & PREFIX_LOCK)) {
|
||||
goto illegal_op;
|
||||
}
|
||||
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
|
||||
break;
|
||||
|
||||
default:
|
||||
goto unknown_op;
|
||||
}
|
||||
break;
|
||||
|
||||
case 0x1aa: /* rsm */
|
||||
gen_svm_check_intercept(s, SVM_EXIT_RSM);
|
||||
if (!(s->flags & HF_SMM_MASK))
|
||||
goto illegal_op;
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/* we should not be in SMM mode */
|
||||
g_assert_not_reached();
|
||||
#else
|
||||
gen_helper_rsm(tcg_env);
|
||||
assume_cc_op(s, CC_OP_EFLAGS);
|
||||
#endif /* CONFIG_USER_ONLY */
|
||||
s->base.is_jmp = DISAS_EOB_ONLY;
|
||||
break;
|
||||
case 0x1b8: /* SSE4.2 popcnt */
|
||||
if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
|
||||
PREFIX_REPZ)
|
||||
goto illegal_op;
|
||||
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
|
||||
goto illegal_op;
|
||||
|
||||
modrm = x86_ldub_code(env, s);
|
||||
reg = ((modrm >> 3) & 7) | REX_R(s);
|
||||
|
||||
ot = dflag;
|
||||
gen_ld_modrm(env, s, modrm, ot);
|
||||
gen_extu(ot, s->T0);
|
||||
tcg_gen_mov_tl(cpu_cc_src, s->T0);
|
||||
tcg_gen_ctpop_tl(s->T0, s->T0);
|
||||
gen_op_mov_reg_v(s, ot, reg, s->T0);
|
||||
|
||||
set_cc_op(s, CC_OP_POPCNT);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user