2003-10-01 00:34:21 +04:00
|
|
|
/*
|
|
|
|
* i386 translation
|
2007-09-17 01:08:06 +04:00
|
|
|
*
|
2003-10-01 00:34:21 +04:00
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-23 15:28:01 +03:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2003-10-01 00:34:21 +04:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2009-07-17 00:47:01 +04:00
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
2003-10-01 00:34:21 +04:00
|
|
|
*/
|
2016-01-26 21:17:03 +03:00
|
|
|
#include "qemu/osdep.h"
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2013-01-24 02:21:52 +04:00
|
|
|
#include "qemu/host-utils.h"
|
2003-10-01 00:34:21 +04:00
|
|
|
#include "cpu.h"
|
2016-03-15 15:18:37 +03:00
|
|
|
#include "exec/exec-all.h"
|
2020-01-01 14:23:00 +03:00
|
|
|
#include "tcg/tcg-op.h"
|
2022-09-18 01:43:52 +03:00
|
|
|
#include "tcg/tcg-op-gvec.h"
|
2017-07-14 11:21:37 +03:00
|
|
|
#include "exec/translator.h"
|
2022-10-19 14:22:06 +03:00
|
|
|
#include "fpu/softfloat.h"
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2014-04-08 09:31:41 +04:00
|
|
|
#include "exec/helper-proto.h"
|
|
|
|
#include "exec/helper-gen.h"
|
2020-12-12 18:55:14 +03:00
|
|
|
#include "helper-tcg.h"
|
2008-11-17 17:43:54 +03:00
|
|
|
|
2016-01-07 16:55:28 +03:00
|
|
|
#include "exec/log.h"
|
2014-05-30 16:12:25 +04:00
|
|
|
|
2023-03-31 20:37:04 +03:00
|
|
|
#define HELPER_H "helper.h"
|
|
|
|
#include "exec/helper-info.c.inc"
|
|
|
|
#undef HELPER_H
|
|
|
|
|
target/i386: move C0-FF opcodes to new decoder (except for x87)
The shift instructions are rewritten instead of reusing code from the old
decoder. Rotates use CC_OP_ADCOX more extensively and generally rely
more on the optimizer, so that the code generators are shared between
the immediate-count and variable-count cases.
In particular, this makes gen_RCL and gen_RCR pretty efficient for the
count == 1 case, which becomes (apart from a few extra movs) something like:
(compute_cc_all if needed)
// save old value for OF calculation
mov cc_src2, T0
// the bulk of RCL is just this!
deposit T0, cc_src, T0, 1, TARGET_LONG_BITS - 1
// compute carry
shr cc_dst, cc_src2, length - 1
and cc_dst, cc_dst, 1
// compute overflow
xor cc_src2, cc_src2, T0
extract cc_src2, cc_src2, length - 1, 1
32-bit MUL and IMUL are also slightly more efficient on 64-bit hosts.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-10-21 18:36:34 +03:00
|
|
|
/* Fixes for Windows namespace pollution. */
|
|
|
|
#undef IN
|
|
|
|
#undef OUT
|
2023-03-31 20:37:04 +03:00
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
#define PREFIX_REPZ 0x01
|
|
|
|
#define PREFIX_REPNZ 0x02
|
|
|
|
#define PREFIX_LOCK 0x04
|
|
|
|
#define PREFIX_DATA 0x08
|
|
|
|
#define PREFIX_ADR 0x10
|
2013-01-11 23:35:02 +04:00
|
|
|
#define PREFIX_VEX 0x20
|
2021-05-14 18:13:07 +03:00
|
|
|
#define PREFIX_REX 0x40
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2013-01-24 02:21:52 +04:00
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
# define ctztl ctz64
|
|
|
|
# define clztl clz64
|
|
|
|
#else
|
|
|
|
# define ctztl ctz32
|
|
|
|
# define clztl clz32
|
|
|
|
#endif
|
|
|
|
|
2015-07-02 15:59:21 +03:00
|
|
|
/* For a switch indexed by MODRM, match all memory operands for a given OP. */
|
2016-03-01 18:12:14 +03:00
|
|
|
#define CASE_MODRM_MEM_OP(OP) \
|
2015-07-02 15:59:21 +03:00
|
|
|
case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
|
|
|
|
case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
|
|
|
|
case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
|
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
#define CASE_MODRM_OP(OP) \
|
|
|
|
case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
|
|
|
|
case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
|
|
|
|
case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
|
|
|
|
case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
|
|
|
|
|
2008-02-01 13:50:11 +03:00
|
|
|
//#define MACRO_TEST 1
|
|
|
|
|
|
|
|
/* global register indexes */
|
2018-09-11 21:38:47 +03:00
|
|
|
static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2;
|
2022-10-01 17:09:33 +03:00
|
|
|
static TCGv cpu_eip;
|
2008-11-17 17:43:54 +03:00
|
|
|
static TCGv_i32 cpu_cc_op;
|
2009-09-29 13:58:04 +04:00
|
|
|
static TCGv cpu_regs[CPU_NB_REGS];
|
2015-12-17 22:19:21 +03:00
|
|
|
static TCGv cpu_seg_base[6];
|
2015-07-09 10:22:46 +03:00
|
|
|
static TCGv_i64 cpu_bndl[4];
|
|
|
|
static TCGv_i64 cpu_bndu[4];
|
2018-09-11 21:07:57 +03:00
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
typedef struct DisasContext {
|
2017-07-14 11:29:42 +03:00
|
|
|
DisasContextBase base;
|
|
|
|
|
2021-05-14 18:13:20 +03:00
|
|
|
target_ulong pc; /* pc = eip + cs_base */
|
|
|
|
target_ulong cs_base; /* base of CS segment */
|
2022-10-01 17:09:35 +03:00
|
|
|
target_ulong pc_save;
|
2021-05-14 18:13:20 +03:00
|
|
|
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp aflag;
|
|
|
|
MemOp dflag;
|
2021-05-14 18:13:20 +03:00
|
|
|
|
|
|
|
int8_t override; /* -1 if no override, else R_CS, R_DS, etc */
|
|
|
|
uint8_t prefix;
|
2021-05-14 18:12:59 +03:00
|
|
|
|
target/i386: add core of new i386 decoder
The new decoder is based on three principles:
- use mostly table-driven decoding, using tables derived as much as possible
from the Intel manual. Centralizing the decode the operands makes it
more homogeneous, for example all immediates are signed. All modrm
handling is in one function, and can be shared between SSE and ALU
instructions (including XMM<->GPR instructions). The SSE/AVX decoder
will also not have duplicated code between the 0F, 0F38 and 0F3A tables.
- keep the code as "non-branchy" as possible. Generally, the code for
the new decoder is more verbose, but the control flow is simpler.
Conditionals are not nested and have small bodies. All instruction
groups are resolved even before operands are decoded, and code
generation is separated as much as possible within small functions
that only handle one instruction each.
- keep address generation and (for ALU operands) memory loads and writeback
as much in common code as possible. All ALU operations for example
are implemented as T0=f(T0,T1). For non-ALU instructions,
read-modify-write memory operations are rare, but registers do not
have TCGv equivalents: therefore, the common logic sets up pointer
temporaries with the operands, while load and writeback are handled
by gvec or by helpers.
These principles make future code review and extensibility simpler, at
the cost of having a relatively large amount of code in the form of this
patch. Even EVEX should not be _too_ hard to implement (it's just a crazy
large amount of possibilities).
This patch introduces the main decoder flow, and integrates the old
decoder with the new one. The old decoder takes care of parsing
prefixes and then optionally drops to the new one. The changes to the
old decoder are minimal and allow it to be replaced incrementally with
the new one.
There is a debugging mechanism through a "LIMIT" environment variable.
In user-mode emulation, the variable is the number of instructions
decoded by the new decoder before permanently switching to the old one.
In system emulation, the variable is the highest opcode that is decoded
by the new decoder (this is less friendly, but it's the best that can
be done without requiring deterministic execution).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-23 12:20:55 +03:00
|
|
|
bool has_modrm;
|
|
|
|
uint8_t modrm;
|
|
|
|
|
2021-05-14 18:12:59 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
uint8_t cpl; /* code priv level */
|
2021-05-14 18:13:00 +03:00
|
|
|
uint8_t iopl; /* i/o priv level */
|
2021-05-14 18:12:59 +03:00
|
|
|
#endif
|
2021-05-14 18:13:20 +03:00
|
|
|
uint8_t vex_l; /* vex vector length */
|
|
|
|
uint8_t vex_v; /* vex vvvv register, without 1's complement. */
|
|
|
|
uint8_t popl_esp_hack; /* for correct popl with esp base handling */
|
|
|
|
uint8_t rip_offset; /* only used in x86_64, but left for simplicity */
|
2021-05-14 18:12:59 +03:00
|
|
|
|
2005-01-04 02:50:08 +03:00
|
|
|
#ifdef TARGET_X86_64
|
2021-05-14 18:13:09 +03:00
|
|
|
uint8_t rex_r;
|
2021-05-14 18:13:08 +03:00
|
|
|
uint8_t rex_x;
|
|
|
|
uint8_t rex_b;
|
2005-01-04 02:50:08 +03:00
|
|
|
#endif
|
2022-10-18 14:27:44 +03:00
|
|
|
bool vex_w; /* used by AVX even on 32-bit processors */
|
2021-05-14 18:13:18 +03:00
|
|
|
bool jmp_opt; /* use direct block chaining for direct jumps */
|
|
|
|
bool repz_opt; /* optimize jumps within repz instructions */
|
2021-05-14 18:13:20 +03:00
|
|
|
bool cc_op_dirty;
|
|
|
|
|
|
|
|
CCOp cc_op; /* current CC operation */
|
2003-10-01 00:34:21 +04:00
|
|
|
int mem_index; /* select memory access functions */
|
2021-05-14 18:13:12 +03:00
|
|
|
uint32_t flags; /* all execution flags */
|
2005-01-04 02:50:08 +03:00
|
|
|
int cpuid_features;
|
2006-07-10 23:53:04 +04:00
|
|
|
int cpuid_ext_features;
|
2008-04-09 10:41:37 +04:00
|
|
|
int cpuid_ext2_features;
|
2008-05-22 14:13:38 +04:00
|
|
|
int cpuid_ext3_features;
|
2012-09-27 00:18:43 +04:00
|
|
|
int cpuid_7_0_ebx_features;
|
2022-09-10 14:47:45 +03:00
|
|
|
int cpuid_7_0_ecx_features;
|
2023-10-10 11:31:39 +03:00
|
|
|
int cpuid_7_1_eax_features;
|
2015-07-02 17:21:23 +03:00
|
|
|
int cpuid_xsave_features;
|
2018-09-11 21:38:47 +03:00
|
|
|
|
|
|
|
/* TCG local temps */
|
|
|
|
TCGv cc_srcT;
|
2018-09-11 21:41:57 +03:00
|
|
|
TCGv A0;
|
2018-09-11 21:48:41 +03:00
|
|
|
TCGv T0;
|
2018-09-11 21:50:46 +03:00
|
|
|
TCGv T1;
|
2018-09-11 21:38:47 +03:00
|
|
|
|
2018-09-11 21:07:57 +03:00
|
|
|
/* TCG local register indexes (only used inside old micro ops) */
|
|
|
|
TCGv tmp0;
|
2018-09-11 21:10:21 +03:00
|
|
|
TCGv tmp4;
|
2018-09-11 21:17:18 +03:00
|
|
|
TCGv_i32 tmp2_i32;
|
2018-09-11 21:17:56 +03:00
|
|
|
TCGv_i32 tmp3_i32;
|
2018-09-11 21:22:31 +03:00
|
|
|
TCGv_i64 tmp1_i64;
|
2018-09-11 21:07:57 +03:00
|
|
|
|
target/i386: trap on instructions longer than >15 bytes
Besides being more correct, arbitrarily long instruction allow the
generation of a translation block that spans three pages. This
confuses the generator and even allows ring 3 code to poison the
translation block cache and inject code into other processes that are
in guest ring 3.
This is an improved (and more invasive) fix for commit 30663fd ("tcg/i386:
Check the size of instruction being translated", 2017-03-24). In addition
to being more precise (and generating the right exception, which is #GP
rather than #UD), it distinguishes better between page faults and too long
instructions, as shown by this test case:
#include <sys/mman.h>
#include <string.h>
#include <stdio.h>
int main()
{
char *x = mmap(NULL, 8192, PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_PRIVATE|MAP_ANON, -1, 0);
memset(x, 0x66, 4096);
x[4096] = 0x90;
x[4097] = 0xc3;
char *i = x + 4096 - 15;
mprotect(x + 4096, 4096, PROT_READ|PROT_WRITE);
((void(*)(void)) i) ();
}
... which produces a #GP without the mprotect, and a #PF with it.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-03-22 13:57:10 +03:00
|
|
|
sigjmp_buf jmpbuf;
|
2024-04-07 00:05:12 +03:00
|
|
|
TCGOp *prev_insn_start;
|
2022-08-17 18:05:05 +03:00
|
|
|
TCGOp *prev_insn_end;
|
2003-10-01 00:34:21 +04:00
|
|
|
} DisasContext;
|
|
|
|
|
2024-05-16 20:04:36 +03:00
|
|
|
/*
|
|
|
|
* Point EIP to next instruction before ending translation.
|
|
|
|
* For instructions that can change hflags.
|
|
|
|
*/
|
|
|
|
#define DISAS_EOB_NEXT DISAS_TARGET_0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Point EIP to next instruction and set HF_INHIBIT_IRQ if not
|
|
|
|
* already set. For instructions that activate interrupt shadow.
|
|
|
|
*/
|
|
|
|
#define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return to the main loop; EIP might have already been updated
|
|
|
|
* but even in that case do not use lookup_and_goto_ptr().
|
|
|
|
*/
|
|
|
|
#define DISAS_EOB_ONLY DISAS_TARGET_2
|
|
|
|
|
|
|
|
/*
|
|
|
|
* EIP has already been updated. For jumps that wish to use
|
|
|
|
* lookup_and_goto_ptr()
|
|
|
|
*/
|
2022-10-01 17:09:22 +03:00
|
|
|
#define DISAS_JUMP DISAS_TARGET_3
|
2022-10-01 17:09:16 +03:00
|
|
|
|
2024-05-16 19:46:55 +03:00
|
|
|
/*
|
|
|
|
* EIP has already been updated. Use updated value of
|
|
|
|
* EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
|
|
|
|
*/
|
|
|
|
#define DISAS_EOB_RECHECK_TF DISAS_TARGET_4
|
|
|
|
|
2021-05-14 18:12:58 +03:00
|
|
|
/* The environment in which user-only runs is constrained. */
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
#define PE(S) true
|
2021-05-14 18:12:59 +03:00
|
|
|
#define CPL(S) 3
|
2021-05-14 18:13:00 +03:00
|
|
|
#define IOPL(S) 0
|
2021-05-14 18:13:22 +03:00
|
|
|
#define SVME(S) false
|
2021-05-14 18:13:23 +03:00
|
|
|
#define GUEST(S) false
|
2021-05-14 18:12:58 +03:00
|
|
|
#else
|
|
|
|
#define PE(S) (((S)->flags & HF_PE_MASK) != 0)
|
2021-05-14 18:12:59 +03:00
|
|
|
#define CPL(S) ((S)->cpl)
|
2021-05-14 18:13:00 +03:00
|
|
|
#define IOPL(S) ((S)->iopl)
|
2021-05-14 18:13:22 +03:00
|
|
|
#define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
|
2021-05-14 18:13:23 +03:00
|
|
|
#define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
|
2021-05-14 18:12:58 +03:00
|
|
|
#endif
|
2021-05-14 18:13:01 +03:00
|
|
|
#if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
|
|
|
|
#define VM86(S) false
|
2021-05-14 18:13:02 +03:00
|
|
|
#define CODE32(S) true
|
2021-05-14 18:13:03 +03:00
|
|
|
#define SS32(S) true
|
2021-05-14 18:13:06 +03:00
|
|
|
#define ADDSEG(S) false
|
2021-05-14 18:13:01 +03:00
|
|
|
#else
|
|
|
|
#define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
|
2021-05-14 18:13:02 +03:00
|
|
|
#define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
|
2021-05-14 18:13:03 +03:00
|
|
|
#define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
|
2021-05-14 18:13:06 +03:00
|
|
|
#define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
|
2021-05-14 18:13:01 +03:00
|
|
|
#endif
|
2021-05-14 18:13:04 +03:00
|
|
|
#if !defined(TARGET_X86_64)
|
|
|
|
#define CODE64(S) false
|
|
|
|
#elif defined(CONFIG_USER_ONLY)
|
|
|
|
#define CODE64(S) true
|
|
|
|
#else
|
|
|
|
#define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
|
2023-06-20 17:49:35 +03:00
|
|
|
#endif
|
2023-10-04 11:22:39 +03:00
|
|
|
#if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
|
2021-05-14 18:13:05 +03:00
|
|
|
#define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
|
2023-10-04 11:22:39 +03:00
|
|
|
#else
|
|
|
|
#define LMA(S) false
|
2021-05-14 18:13:04 +03:00
|
|
|
#endif
|
2021-05-14 18:12:58 +03:00
|
|
|
|
2021-05-14 18:13:07 +03:00
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
#define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
|
2022-10-18 14:27:44 +03:00
|
|
|
#define REX_W(S) ((S)->vex_w)
|
2021-05-14 18:13:09 +03:00
|
|
|
#define REX_R(S) ((S)->rex_r + 0)
|
2021-05-14 18:13:08 +03:00
|
|
|
#define REX_X(S) ((S)->rex_x + 0)
|
|
|
|
#define REX_B(S) ((S)->rex_b + 0)
|
2021-05-14 18:13:07 +03:00
|
|
|
#else
|
|
|
|
#define REX_PREFIX(S) false
|
2021-05-14 18:13:10 +03:00
|
|
|
#define REX_W(S) false
|
2021-05-14 18:13:09 +03:00
|
|
|
#define REX_R(S) 0
|
2021-05-14 18:13:08 +03:00
|
|
|
#define REX_X(S) 0
|
|
|
|
#define REX_B(S) 0
|
2021-05-14 18:13:07 +03:00
|
|
|
#endif
|
|
|
|
|
2021-05-14 18:13:21 +03:00
|
|
|
/*
|
|
|
|
* Many sysemu-only helpers are not reachable for user-only.
|
|
|
|
* Define stub generators here, so that we need not either sprinkle
|
|
|
|
* ifdefs through the translator, nor provide the helper function.
|
|
|
|
*/
|
|
|
|
#define STUB_HELPER(NAME, ...) \
|
|
|
|
static inline void gen_helper_##NAME(__VA_ARGS__) \
|
|
|
|
{ qemu_build_not_reached(); }
|
|
|
|
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
2021-05-14 18:13:25 +03:00
|
|
|
STUB_HELPER(clgi, TCGv_env env)
|
2021-05-14 18:13:34 +03:00
|
|
|
STUB_HELPER(flush_page, TCGv_env env, TCGv addr)
|
2021-05-14 18:13:42 +03:00
|
|
|
STUB_HELPER(inb, TCGv ret, TCGv_env env, TCGv_i32 port)
|
|
|
|
STUB_HELPER(inw, TCGv ret, TCGv_env env, TCGv_i32 port)
|
|
|
|
STUB_HELPER(inl, TCGv ret, TCGv_env env, TCGv_i32 port)
|
2021-05-14 18:13:33 +03:00
|
|
|
STUB_HELPER(monitor, TCGv_env env, TCGv addr)
|
|
|
|
STUB_HELPER(mwait, TCGv_env env, TCGv_i32 pc_ofs)
|
2021-05-14 18:13:42 +03:00
|
|
|
STUB_HELPER(outb, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
|
|
|
|
STUB_HELPER(outw, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
|
|
|
|
STUB_HELPER(outl, TCGv_env env, TCGv_i32 port, TCGv_i32 val)
|
2021-05-14 18:13:36 +03:00
|
|
|
STUB_HELPER(rdmsr, TCGv_env env)
|
|
|
|
STUB_HELPER(read_crN, TCGv ret, TCGv_env env, TCGv_i32 reg)
|
2021-07-06 18:53:29 +03:00
|
|
|
STUB_HELPER(get_dr, TCGv ret, TCGv_env env, TCGv_i32 reg)
|
2021-05-14 18:13:21 +03:00
|
|
|
STUB_HELPER(set_dr, TCGv_env env, TCGv_i32 reg, TCGv val)
|
2021-05-14 18:13:25 +03:00
|
|
|
STUB_HELPER(stgi, TCGv_env env)
|
2021-05-14 18:13:28 +03:00
|
|
|
STUB_HELPER(svm_check_intercept, TCGv_env env, TCGv_i32 type)
|
2021-05-14 18:13:25 +03:00
|
|
|
STUB_HELPER(vmload, TCGv_env env, TCGv_i32 aflag)
|
|
|
|
STUB_HELPER(vmmcall, TCGv_env env)
|
|
|
|
STUB_HELPER(vmrun, TCGv_env env, TCGv_i32 aflag, TCGv_i32 pc_ofs)
|
|
|
|
STUB_HELPER(vmsave, TCGv_env env, TCGv_i32 aflag)
|
2021-05-14 18:13:36 +03:00
|
|
|
STUB_HELPER(write_crN, TCGv_env env, TCGv_i32 reg, TCGv val)
|
|
|
|
STUB_HELPER(wrmsr, TCGv_env env)
|
2021-05-14 18:13:21 +03:00
|
|
|
#endif
|
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
static void gen_eob(DisasContext *s);
|
2022-10-01 17:09:22 +03:00
|
|
|
static void gen_jr(DisasContext *s);
|
2022-10-01 17:09:26 +03:00
|
|
|
static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num);
|
2022-10-01 17:09:27 +03:00
|
|
|
static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num);
|
2021-05-14 18:13:41 +03:00
|
|
|
static void gen_exception_gpf(DisasContext *s);
|
2003-10-01 00:34:21 +04:00
|
|
|
|
|
|
|
/* i386 shift ops */
|
|
|
|
enum {
|
2007-09-17 01:08:06 +04:00
|
|
|
OP_ROL,
|
|
|
|
OP_ROR,
|
|
|
|
OP_RCL,
|
|
|
|
OP_RCR,
|
|
|
|
OP_SHL,
|
|
|
|
OP_SHR,
|
2003-10-01 00:34:21 +04:00
|
|
|
OP_SHL1, /* undocumented */
|
|
|
|
OP_SAR = 7,
|
|
|
|
};
|
|
|
|
|
2008-05-21 23:16:45 +04:00
|
|
|
enum {
|
|
|
|
JCC_O,
|
|
|
|
JCC_B,
|
|
|
|
JCC_Z,
|
|
|
|
JCC_BE,
|
|
|
|
JCC_S,
|
|
|
|
JCC_P,
|
|
|
|
JCC_L,
|
|
|
|
JCC_LE,
|
|
|
|
};
|
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
enum {
|
|
|
|
/* I386 int registers */
|
|
|
|
OR_EAX, /* MUST be even numbered */
|
|
|
|
OR_ECX,
|
|
|
|
OR_EDX,
|
|
|
|
OR_EBX,
|
|
|
|
OR_ESP,
|
|
|
|
OR_EBP,
|
|
|
|
OR_ESI,
|
|
|
|
OR_EDI,
|
2005-01-04 02:50:08 +03:00
|
|
|
|
|
|
|
OR_TMP0 = 16, /* temporary operand register */
|
2003-10-01 00:34:21 +04:00
|
|
|
OR_TMP1,
|
|
|
|
OR_A0, /* temporary register used when doing address evaluation */
|
|
|
|
};
|
|
|
|
|
2013-01-24 01:26:38 +04:00
|
|
|
enum {
|
target-i386: optimize flags checking after sub using CC_SRCT
After a comparison or subtraction, the original value of the LHS will
currently be reconstructed using an addition. However, in most cases
it is already available: store it in a temp-local variable and save 1
or 2 TCG ops (2 if the result of the addition needs to be extended).
The temp-local can be declared dead as soon as the cc_op changes again,
or also before the translation block ends because gen_prepare_cc will
always make a copy before returning it. All this magic, plus copy
propagation and dead-code elimination, ensures that the temp local will
(almost) never be spilled.
Example (cmp $0x21,%rax + jbe):
Before After
----------------------------------------------------------------------------
movi_i64 tmp1,$0x21 movi_i64 tmp1,$0x21
movi_i64 cc_src,$0x21 movi_i64 cc_src,$0x21
sub_i64 cc_dst,rax,tmp1 sub_i64 cc_dst,rax,tmp1
add_i64 tmp7,cc_dst,cc_src
movi_i32 cc_op,$0x11 movi_i32 cc_op,$0x11
brcond_i64 tmp7,cc_src,leu,$0x0 discard loc11
brcond_i64 rax,cc_src,leu,$0x0
Before After
----------------------------------------------------------------------------
mov (%r14),%rbp mov (%r14),%rbp
mov %rbp,%rbx mov %rbp,%rbx
sub $0x21,%rbx sub $0x21,%rbx
lea 0x21(%rbx),%r12
movl $0x11,0xa0(%r14) movl $0x11,0xa0(%r14)
movq $0x21,0x90(%r14) movq $0x21,0x90(%r14)
mov %rbx,0x98(%r14) mov %rbx,0x98(%r14)
cmp $0x21,%r12 | cmp $0x21,%rbp
jbe ... jbe ...
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-01-24 03:43:03 +04:00
|
|
|
USES_CC_DST = 1,
|
|
|
|
USES_CC_SRC = 2,
|
2013-01-24 04:03:16 +04:00
|
|
|
USES_CC_SRC2 = 4,
|
|
|
|
USES_CC_SRCT = 8,
|
2013-01-24 01:26:38 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Bit set if the global variable is live after setting CC_OP to X. */
|
|
|
|
static const uint8_t cc_op_live[CC_OP_NB] = {
|
2013-01-24 04:03:16 +04:00
|
|
|
[CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
|
2013-01-24 01:26:38 +04:00
|
|
|
[CC_OP_EFLAGS] = USES_CC_SRC,
|
|
|
|
[CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
|
|
|
|
[CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
|
2013-01-24 04:03:16 +04:00
|
|
|
[CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
|
target-i386: optimize flags checking after sub using CC_SRCT
After a comparison or subtraction, the original value of the LHS will
currently be reconstructed using an addition. However, in most cases
it is already available: store it in a temp-local variable and save 1
or 2 TCG ops (2 if the result of the addition needs to be extended).
The temp-local can be declared dead as soon as the cc_op changes again,
or also before the translation block ends because gen_prepare_cc will
always make a copy before returning it. All this magic, plus copy
propagation and dead-code elimination, ensures that the temp local will
(almost) never be spilled.
Example (cmp $0x21,%rax + jbe):
Before After
----------------------------------------------------------------------------
movi_i64 tmp1,$0x21 movi_i64 tmp1,$0x21
movi_i64 cc_src,$0x21 movi_i64 cc_src,$0x21
sub_i64 cc_dst,rax,tmp1 sub_i64 cc_dst,rax,tmp1
add_i64 tmp7,cc_dst,cc_src
movi_i32 cc_op,$0x11 movi_i32 cc_op,$0x11
brcond_i64 tmp7,cc_src,leu,$0x0 discard loc11
brcond_i64 rax,cc_src,leu,$0x0
Before After
----------------------------------------------------------------------------
mov (%r14),%rbp mov (%r14),%rbp
mov %rbp,%rbx mov %rbp,%rbx
sub $0x21,%rbx sub $0x21,%rbx
lea 0x21(%rbx),%r12
movl $0x11,0xa0(%r14) movl $0x11,0xa0(%r14)
movq $0x21,0x90(%r14) movq $0x21,0x90(%r14)
mov %rbx,0x98(%r14) mov %rbx,0x98(%r14)
cmp $0x21,%r12 | cmp $0x21,%rbp
jbe ... jbe ...
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-01-24 03:43:03 +04:00
|
|
|
[CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
|
2013-01-24 04:03:16 +04:00
|
|
|
[CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
|
2013-01-24 01:26:38 +04:00
|
|
|
[CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
|
|
|
|
[CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
|
|
|
|
[CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
|
|
|
|
[CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
|
|
|
|
[CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
|
2013-01-24 04:44:37 +04:00
|
|
|
[CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
|
2013-01-24 06:17:33 +04:00
|
|
|
[CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
|
|
|
|
[CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
|
|
|
|
[CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
|
2013-01-30 01:38:43 +04:00
|
|
|
[CC_OP_CLR] = 0,
|
2016-11-21 14:18:53 +03:00
|
|
|
[CC_OP_POPCNT] = USES_CC_SRC,
|
2013-01-24 01:26:38 +04:00
|
|
|
};
|
|
|
|
|
2024-05-17 00:08:40 +03:00
|
|
|
static void set_cc_op_1(DisasContext *s, CCOp op, bool dirty)
|
2013-01-24 00:30:52 +04:00
|
|
|
{
|
2013-01-24 01:26:38 +04:00
|
|
|
int dead;
|
|
|
|
|
|
|
|
if (s->cc_op == op) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Discard CC computation that will no longer be used. */
|
|
|
|
dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
|
|
|
|
if (dead & USES_CC_DST) {
|
|
|
|
tcg_gen_discard_tl(cpu_cc_dst);
|
2013-01-24 00:34:26 +04:00
|
|
|
}
|
2013-01-24 01:26:38 +04:00
|
|
|
if (dead & USES_CC_SRC) {
|
|
|
|
tcg_gen_discard_tl(cpu_cc_src);
|
|
|
|
}
|
2013-01-24 04:03:16 +04:00
|
|
|
if (dead & USES_CC_SRC2) {
|
|
|
|
tcg_gen_discard_tl(cpu_cc_src2);
|
|
|
|
}
|
target-i386: optimize flags checking after sub using CC_SRCT
After a comparison or subtraction, the original value of the LHS will
currently be reconstructed using an addition. However, in most cases
it is already available: store it in a temp-local variable and save 1
or 2 TCG ops (2 if the result of the addition needs to be extended).
The temp-local can be declared dead as soon as the cc_op changes again,
or also before the translation block ends because gen_prepare_cc will
always make a copy before returning it. All this magic, plus copy
propagation and dead-code elimination, ensures that the temp local will
(almost) never be spilled.
Example (cmp $0x21,%rax + jbe):
Before After
----------------------------------------------------------------------------
movi_i64 tmp1,$0x21 movi_i64 tmp1,$0x21
movi_i64 cc_src,$0x21 movi_i64 cc_src,$0x21
sub_i64 cc_dst,rax,tmp1 sub_i64 cc_dst,rax,tmp1
add_i64 tmp7,cc_dst,cc_src
movi_i32 cc_op,$0x11 movi_i32 cc_op,$0x11
brcond_i64 tmp7,cc_src,leu,$0x0 discard loc11
brcond_i64 rax,cc_src,leu,$0x0
Before After
----------------------------------------------------------------------------
mov (%r14),%rbp mov (%r14),%rbp
mov %rbp,%rbx mov %rbp,%rbx
sub $0x21,%rbx sub $0x21,%rbx
lea 0x21(%rbx),%r12
movl $0x11,0xa0(%r14) movl $0x11,0xa0(%r14)
movq $0x21,0x90(%r14) movq $0x21,0x90(%r14)
mov %rbx,0x98(%r14) mov %rbx,0x98(%r14)
cmp $0x21,%r12 | cmp $0x21,%rbp
jbe ... jbe ...
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-01-24 03:43:03 +04:00
|
|
|
if (dead & USES_CC_SRCT) {
|
2018-09-11 21:38:47 +03:00
|
|
|
tcg_gen_discard_tl(s->cc_srcT);
|
target-i386: optimize flags checking after sub using CC_SRCT
After a comparison or subtraction, the original value of the LHS will
currently be reconstructed using an addition. However, in most cases
it is already available: store it in a temp-local variable and save 1
or 2 TCG ops (2 if the result of the addition needs to be extended).
The temp-local can be declared dead as soon as the cc_op changes again,
or also before the translation block ends because gen_prepare_cc will
always make a copy before returning it. All this magic, plus copy
propagation and dead-code elimination, ensures that the temp local will
(almost) never be spilled.
Example (cmp $0x21,%rax + jbe):
Before After
----------------------------------------------------------------------------
movi_i64 tmp1,$0x21 movi_i64 tmp1,$0x21
movi_i64 cc_src,$0x21 movi_i64 cc_src,$0x21
sub_i64 cc_dst,rax,tmp1 sub_i64 cc_dst,rax,tmp1
add_i64 tmp7,cc_dst,cc_src
movi_i32 cc_op,$0x11 movi_i32 cc_op,$0x11
brcond_i64 tmp7,cc_src,leu,$0x0 discard loc11
brcond_i64 rax,cc_src,leu,$0x0
Before After
----------------------------------------------------------------------------
mov (%r14),%rbp mov (%r14),%rbp
mov %rbp,%rbx mov %rbp,%rbx
sub $0x21,%rbx sub $0x21,%rbx
lea 0x21(%rbx),%r12
movl $0x11,0xa0(%r14) movl $0x11,0xa0(%r14)
movq $0x21,0x90(%r14) movq $0x21,0x90(%r14)
mov %rbx,0x98(%r14) mov %rbx,0x98(%r14)
cmp $0x21,%r12 | cmp $0x21,%rbp
jbe ... jbe ...
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-01-24 03:43:03 +04:00
|
|
|
}
|
2013-01-24 01:26:38 +04:00
|
|
|
|
2024-05-17 00:08:40 +03:00
|
|
|
if (dirty && s->cc_op == CC_OP_DYNAMIC) {
|
|
|
|
tcg_gen_discard_i32(cpu_cc_op);
|
2013-02-20 02:48:43 +04:00
|
|
|
}
|
2024-05-17 00:08:40 +03:00
|
|
|
s->cc_op_dirty = dirty;
|
2013-01-24 01:26:38 +04:00
|
|
|
s->cc_op = op;
|
2013-01-24 00:34:26 +04:00
|
|
|
}
|
|
|
|
|
2024-05-17 00:08:40 +03:00
|
|
|
static void set_cc_op(DisasContext *s, CCOp op)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The DYNAMIC setting is translator only, everything else
|
|
|
|
* will be spilled later.
|
|
|
|
*/
|
|
|
|
set_cc_op_1(s, op, op != CC_OP_DYNAMIC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void assume_cc_op(DisasContext *s, CCOp op)
|
|
|
|
{
|
|
|
|
set_cc_op_1(s, op, false);
|
|
|
|
}
|
|
|
|
|
2013-01-24 00:34:26 +04:00
|
|
|
static void gen_update_cc_op(DisasContext *s)
|
|
|
|
{
|
|
|
|
if (s->cc_op_dirty) {
|
2013-01-24 00:43:12 +04:00
|
|
|
tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
|
2013-01-24 00:34:26 +04:00
|
|
|
s->cc_op_dirty = false;
|
|
|
|
}
|
2013-01-24 00:30:52 +04:00
|
|
|
}
|
|
|
|
|
2005-01-04 02:50:08 +03:00
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
|
|
|
|
#define NB_OP_SIZES 4
|
|
|
|
|
|
|
|
#else /* !TARGET_X86_64 */
|
|
|
|
|
|
|
|
#define NB_OP_SIZES 3
|
|
|
|
|
|
|
|
#endif /* !TARGET_X86_64 */
|
|
|
|
|
2022-03-23 18:57:17 +03:00
|
|
|
#if HOST_BIG_ENDIAN
|
2008-02-01 13:50:11 +03:00
|
|
|
#define REG_B_OFFSET (sizeof(target_ulong) - 1)
|
|
|
|
#define REG_H_OFFSET (sizeof(target_ulong) - 2)
|
|
|
|
#define REG_W_OFFSET (sizeof(target_ulong) - 2)
|
|
|
|
#define REG_L_OFFSET (sizeof(target_ulong) - 4)
|
|
|
|
#define REG_LH_OFFSET (sizeof(target_ulong) - 8)
|
2005-01-04 02:50:08 +03:00
|
|
|
#else
|
2008-02-01 13:50:11 +03:00
|
|
|
#define REG_B_OFFSET 0
|
|
|
|
#define REG_H_OFFSET 1
|
|
|
|
#define REG_W_OFFSET 0
|
|
|
|
#define REG_L_OFFSET 0
|
|
|
|
#define REG_LH_OFFSET 4
|
2005-01-04 02:50:08 +03:00
|
|
|
#endif
|
2008-02-01 13:50:11 +03:00
|
|
|
|
2012-07-06 01:28:59 +04:00
|
|
|
/* In instruction encodings for byte register accesses the
|
|
|
|
* register number usually indicates "low 8 bits of register N";
|
|
|
|
* however there are some special cases where N 4..7 indicates
|
|
|
|
* [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
|
|
|
|
* true for this special case, false otherwise.
|
|
|
|
*/
|
2018-09-11 23:07:54 +03:00
|
|
|
static inline bool byte_reg_is_xH(DisasContext *s, int reg)
|
2012-07-06 01:28:59 +04:00
|
|
|
{
|
2021-05-14 18:13:07 +03:00
|
|
|
/* Any time the REX prefix is present, byte registers are uniform */
|
|
|
|
if (reg < 4 || REX_PREFIX(s)) {
|
2012-07-06 01:28:59 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-11-06 03:37:57 +04:00
|
|
|
/* Select the size of a push/pop operation. */
|
2019-08-23 21:10:58 +03:00
|
|
|
static inline MemOp mo_pushpop(DisasContext *s, MemOp ot)
|
2013-11-06 03:37:57 +04:00
|
|
|
{
|
|
|
|
if (CODE64(s)) {
|
|
|
|
return ot == MO_16 ? MO_16 : MO_64;
|
|
|
|
} else {
|
|
|
|
return ot;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-17 22:19:18 +03:00
|
|
|
/* Select the size of the stack pointer. */
|
2019-08-23 21:10:58 +03:00
|
|
|
static inline MemOp mo_stacksize(DisasContext *s)
|
2015-12-17 22:19:18 +03:00
|
|
|
{
|
2021-05-14 18:13:03 +03:00
|
|
|
return CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
|
2015-12-17 22:19:18 +03:00
|
|
|
}
|
|
|
|
|
2013-11-06 03:37:57 +04:00
|
|
|
/* Select size 8 if lsb of B is clear, else OT. Used for decoding
|
|
|
|
byte vs word opcodes. */
|
2019-08-23 21:10:58 +03:00
|
|
|
static inline MemOp mo_b_d(int b, MemOp ot)
|
2013-11-06 03:37:57 +04:00
|
|
|
{
|
|
|
|
return b & 1 ? ot : MO_8;
|
|
|
|
}
|
|
|
|
|
2022-09-11 15:04:36 +03:00
|
|
|
/* Compute the result of writing t0 to the OT-sized register REG.
|
|
|
|
*
|
|
|
|
* If DEST is NULL, store the result into the register and return the
|
|
|
|
* register's TCGv.
|
|
|
|
*
|
|
|
|
* If DEST is not NULL, store the result into DEST and return the
|
|
|
|
* register's TCGv.
|
|
|
|
*/
|
|
|
|
static TCGv gen_op_deposit_reg_v(DisasContext *s, MemOp ot, int reg, TCGv dest, TCGv t0)
|
2008-02-01 13:50:11 +03:00
|
|
|
{
|
|
|
|
switch(ot) {
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_8:
|
2022-09-11 15:04:36 +03:00
|
|
|
if (byte_reg_is_xH(s, reg)) {
|
|
|
|
dest = dest ? dest : cpu_regs[reg - 4];
|
|
|
|
tcg_gen_deposit_tl(dest, cpu_regs[reg - 4], t0, 8, 8);
|
|
|
|
return cpu_regs[reg - 4];
|
2008-02-01 13:50:11 +03:00
|
|
|
}
|
2022-09-11 15:04:36 +03:00
|
|
|
dest = dest ? dest : cpu_regs[reg];
|
|
|
|
tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 8);
|
2008-02-01 13:50:11 +03:00
|
|
|
break;
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_16:
|
2022-09-11 15:04:36 +03:00
|
|
|
dest = dest ? dest : cpu_regs[reg];
|
|
|
|
tcg_gen_deposit_tl(dest, cpu_regs[reg], t0, 0, 16);
|
2008-02-01 13:50:11 +03:00
|
|
|
break;
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_32:
|
2009-09-29 13:58:04 +04:00
|
|
|
/* For x86_64, this sets the higher half of register to zero.
|
|
|
|
For i386, this is equivalent to a mov. */
|
2022-09-11 15:04:36 +03:00
|
|
|
dest = dest ? dest : cpu_regs[reg];
|
|
|
|
tcg_gen_ext32u_tl(dest, t0);
|
2008-02-01 13:50:11 +03:00
|
|
|
break;
|
2009-09-29 13:58:04 +04:00
|
|
|
#ifdef TARGET_X86_64
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_64:
|
2022-09-11 15:04:36 +03:00
|
|
|
dest = dest ? dest : cpu_regs[reg];
|
|
|
|
tcg_gen_mov_tl(dest, t0);
|
2008-02-01 13:50:11 +03:00
|
|
|
break;
|
2005-01-04 02:50:08 +03:00
|
|
|
#endif
|
2013-11-06 01:25:05 +04:00
|
|
|
default:
|
2023-04-05 22:09:14 +03:00
|
|
|
g_assert_not_reached();
|
2008-02-01 13:50:11 +03:00
|
|
|
}
|
2022-09-11 15:04:36 +03:00
|
|
|
return cpu_regs[reg];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_op_mov_reg_v(DisasContext *s, MemOp ot, int reg, TCGv t0)
|
|
|
|
{
|
|
|
|
gen_op_deposit_reg_v(s, ot, reg, NULL, t0);
|
2008-02-01 13:50:11 +03:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2018-09-11 23:07:54 +03:00
|
|
|
static inline
|
2019-08-23 21:10:58 +03:00
|
|
|
void gen_op_mov_v_reg(DisasContext *s, MemOp ot, TCGv t0, int reg)
|
2008-02-01 13:50:11 +03:00
|
|
|
{
|
2018-09-11 23:07:54 +03:00
|
|
|
if (ot == MO_8 && byte_reg_is_xH(s, reg)) {
|
2016-10-15 19:54:17 +03:00
|
|
|
tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
|
2012-07-06 01:28:59 +04:00
|
|
|
} else {
|
2009-09-29 13:58:04 +04:00
|
|
|
tcg_gen_mov_tl(t0, cpu_regs[reg]);
|
2008-02-01 13:50:11 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_add_A0_im(DisasContext *s, int val)
|
|
|
|
{
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_addi_tl(s->A0, s->A0, val);
|
2015-12-17 22:19:25 +03:00
|
|
|
if (!CODE64(s)) {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_ext32u_tl(s->A0, s->A0);
|
2015-12-17 22:19:25 +03:00
|
|
|
}
|
2008-02-01 13:50:11 +03:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2022-10-01 17:09:35 +03:00
|
|
|
static inline void gen_op_jmp_v(DisasContext *s, TCGv dest)
|
2008-02-01 13:50:11 +03:00
|
|
|
{
|
2022-10-01 17:09:33 +03:00
|
|
|
tcg_gen_mov_tl(cpu_eip, dest);
|
2022-10-01 17:09:35 +03:00
|
|
|
s->pc_save = -1;
|
2008-02-01 13:50:11 +03:00
|
|
|
}
|
|
|
|
|
2018-09-11 21:07:57 +03:00
|
|
|
static inline
|
2019-08-23 21:10:58 +03:00
|
|
|
void gen_op_add_reg_im(DisasContext *s, MemOp size, int reg, int32_t val)
|
2008-02-01 13:50:11 +03:00
|
|
|
{
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_addi_tl(s->tmp0, cpu_regs[reg], val);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, size, reg, s->tmp0);
|
2008-02-01 13:50:11 +03:00
|
|
|
}
|
|
|
|
|
2023-10-19 17:55:39 +03:00
|
|
|
static inline void gen_op_add_reg(DisasContext *s, MemOp size, int reg, TCGv val)
|
2008-02-01 13:50:11 +03:00
|
|
|
{
|
2023-10-19 17:55:39 +03:00
|
|
|
tcg_gen_add_tl(s->tmp0, cpu_regs[reg], val);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, size, reg, s->tmp0);
|
2008-05-18 23:28:26 +04:00
|
|
|
}
|
2008-02-01 13:50:11 +03:00
|
|
|
|
2013-10-31 09:04:05 +04:00
|
|
|
static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
|
2008-02-01 13:50:11 +03:00
|
|
|
{
|
2013-11-02 20:30:34 +04:00
|
|
|
tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
|
2008-02-01 13:50:11 +03:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2013-10-31 09:04:05 +04:00
|
|
|
static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
|
2008-02-01 13:50:11 +03:00
|
|
|
{
|
2013-11-02 20:49:20 +04:00
|
|
|
tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
|
2008-02-01 13:50:11 +03:00
|
|
|
}
|
2004-01-04 20:35:00 +03:00
|
|
|
|
2013-11-02 21:59:43 +04:00
|
|
|
static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
|
|
|
|
{
|
|
|
|
if (d == OR_TMP0) {
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, idx, s->T0, s->A0);
|
2013-11-02 21:59:43 +04:00
|
|
|
} else {
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, idx, d, s->T0);
|
2013-11-02 21:59:43 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:14 +03:00
|
|
|
static void gen_update_eip_cur(DisasContext *s)
|
|
|
|
{
|
2022-10-01 17:09:35 +03:00
|
|
|
assert(s->pc_save != -1);
|
2023-02-27 16:51:42 +03:00
|
|
|
if (tb_cflags(s->base.tb) & CF_PCREL) {
|
2022-10-01 17:09:35 +03:00
|
|
|
tcg_gen_addi_tl(cpu_eip, cpu_eip, s->base.pc_next - s->pc_save);
|
2023-12-12 20:01:38 +03:00
|
|
|
} else if (CODE64(s)) {
|
|
|
|
tcg_gen_movi_tl(cpu_eip, s->base.pc_next);
|
2022-10-01 17:09:35 +03:00
|
|
|
} else {
|
2023-12-12 20:01:38 +03:00
|
|
|
tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->base.pc_next - s->cs_base));
|
2022-10-01 17:09:35 +03:00
|
|
|
}
|
|
|
|
s->pc_save = s->base.pc_next;
|
2005-01-04 02:50:08 +03:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:15 +03:00
|
|
|
static void gen_update_eip_next(DisasContext *s)
|
|
|
|
{
|
2022-10-01 17:09:35 +03:00
|
|
|
assert(s->pc_save != -1);
|
2023-02-27 16:51:42 +03:00
|
|
|
if (tb_cflags(s->base.tb) & CF_PCREL) {
|
2022-10-01 17:09:35 +03:00
|
|
|
tcg_gen_addi_tl(cpu_eip, cpu_eip, s->pc - s->pc_save);
|
2023-12-12 20:01:38 +03:00
|
|
|
} else if (CODE64(s)) {
|
2024-01-15 05:08:04 +03:00
|
|
|
tcg_gen_movi_tl(cpu_eip, s->pc);
|
2022-10-01 17:09:35 +03:00
|
|
|
} else {
|
2024-01-15 05:08:04 +03:00
|
|
|
tcg_gen_movi_tl(cpu_eip, (uint32_t)(s->pc - s->cs_base));
|
2022-10-01 17:09:35 +03:00
|
|
|
}
|
|
|
|
s->pc_save = s->pc;
|
2022-10-01 17:09:15 +03:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:20 +03:00
|
|
|
static int cur_insn_len(DisasContext *s)
|
|
|
|
{
|
|
|
|
return s->pc - s->base.pc_next;
|
|
|
|
}
|
|
|
|
|
|
|
|
static TCGv_i32 cur_insn_len_i32(DisasContext *s)
|
|
|
|
{
|
|
|
|
return tcg_constant_i32(cur_insn_len(s));
|
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:24 +03:00
|
|
|
static TCGv_i32 eip_next_i32(DisasContext *s)
|
|
|
|
{
|
2022-10-01 17:09:35 +03:00
|
|
|
assert(s->pc_save != -1);
|
2022-10-01 17:09:24 +03:00
|
|
|
/*
|
|
|
|
* This function has two users: lcall_real (always 16-bit mode), and
|
|
|
|
* iret_protected (16, 32, or 64-bit mode). IRET only uses the value
|
|
|
|
* when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
|
|
|
|
* why passing a 32-bit value isn't broken. To avoid using this where
|
|
|
|
* we shouldn't, return -1 in 64-bit mode so that execution goes into
|
|
|
|
* the weeds quickly.
|
|
|
|
*/
|
|
|
|
if (CODE64(s)) {
|
|
|
|
return tcg_constant_i32(-1);
|
|
|
|
}
|
2023-02-27 16:51:42 +03:00
|
|
|
if (tb_cflags(s->base.tb) & CF_PCREL) {
|
2022-10-01 17:09:35 +03:00
|
|
|
TCGv_i32 ret = tcg_temp_new_i32();
|
|
|
|
tcg_gen_trunc_tl_i32(ret, cpu_eip);
|
|
|
|
tcg_gen_addi_i32(ret, ret, s->pc - s->pc_save);
|
|
|
|
return ret;
|
|
|
|
} else {
|
|
|
|
return tcg_constant_i32(s->pc - s->cs_base);
|
|
|
|
}
|
2022-10-01 17:09:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static TCGv eip_next_tl(DisasContext *s)
|
|
|
|
{
|
2022-10-01 17:09:35 +03:00
|
|
|
assert(s->pc_save != -1);
|
2023-02-27 16:51:42 +03:00
|
|
|
if (tb_cflags(s->base.tb) & CF_PCREL) {
|
2022-10-01 17:09:35 +03:00
|
|
|
TCGv ret = tcg_temp_new();
|
|
|
|
tcg_gen_addi_tl(ret, cpu_eip, s->pc - s->pc_save);
|
|
|
|
return ret;
|
2023-12-12 20:01:38 +03:00
|
|
|
} else if (CODE64(s)) {
|
|
|
|
return tcg_constant_tl(s->pc);
|
2022-10-01 17:09:35 +03:00
|
|
|
} else {
|
2023-12-12 20:01:38 +03:00
|
|
|
return tcg_constant_tl((uint32_t)(s->pc - s->cs_base));
|
2022-10-01 17:09:35 +03:00
|
|
|
}
|
2022-10-01 17:09:24 +03:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:32 +03:00
|
|
|
static TCGv eip_cur_tl(DisasContext *s)
|
|
|
|
{
|
2022-10-01 17:09:35 +03:00
|
|
|
assert(s->pc_save != -1);
|
2023-02-27 16:51:42 +03:00
|
|
|
if (tb_cflags(s->base.tb) & CF_PCREL) {
|
2022-10-01 17:09:35 +03:00
|
|
|
TCGv ret = tcg_temp_new();
|
|
|
|
tcg_gen_addi_tl(ret, cpu_eip, s->base.pc_next - s->pc_save);
|
|
|
|
return ret;
|
2023-12-12 20:01:38 +03:00
|
|
|
} else if (CODE64(s)) {
|
|
|
|
return tcg_constant_tl(s->base.pc_next);
|
2022-10-01 17:09:35 +03:00
|
|
|
} else {
|
2023-12-12 20:01:38 +03:00
|
|
|
return tcg_constant_tl((uint32_t)(s->base.pc_next - s->cs_base));
|
2022-10-01 17:09:35 +03:00
|
|
|
}
|
2022-10-01 17:09:32 +03:00
|
|
|
}
|
|
|
|
|
2022-09-21 15:13:01 +03:00
|
|
|
/* Compute SEG:REG into DEST. SEG is selected from the override segment
|
2015-12-17 22:19:17 +03:00
|
|
|
(OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
|
|
|
|
indicate no override. */
|
2022-09-21 15:13:01 +03:00
|
|
|
static void gen_lea_v_seg_dest(DisasContext *s, MemOp aflag, TCGv dest, TCGv a0,
|
|
|
|
int def_seg, int ovr_seg)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2015-12-17 22:19:17 +03:00
|
|
|
switch (aflag) {
|
2005-01-04 02:50:08 +03:00
|
|
|
#ifdef TARGET_X86_64
|
2013-11-06 02:27:33 +04:00
|
|
|
case MO_64:
|
2015-12-17 22:19:17 +03:00
|
|
|
if (ovr_seg < 0) {
|
2022-09-21 15:13:01 +03:00
|
|
|
tcg_gen_mov_tl(dest, a0);
|
2015-12-17 22:19:17 +03:00
|
|
|
return;
|
2005-01-04 02:50:08 +03:00
|
|
|
}
|
2013-11-06 02:27:33 +04:00
|
|
|
break;
|
2005-01-04 02:50:08 +03:00
|
|
|
#endif
|
2013-11-06 02:27:33 +04:00
|
|
|
case MO_32:
|
2003-10-01 00:34:21 +04:00
|
|
|
/* 32 bit address */
|
2021-05-14 18:13:06 +03:00
|
|
|
if (ovr_seg < 0 && ADDSEG(s)) {
|
2016-10-12 10:23:39 +03:00
|
|
|
ovr_seg = def_seg;
|
|
|
|
}
|
2015-12-17 22:19:17 +03:00
|
|
|
if (ovr_seg < 0) {
|
2022-09-21 15:13:01 +03:00
|
|
|
tcg_gen_ext32u_tl(dest, a0);
|
2016-10-12 10:23:39 +03:00
|
|
|
return;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2013-11-06 02:27:33 +04:00
|
|
|
break;
|
|
|
|
case MO_16:
|
2015-12-17 22:19:17 +03:00
|
|
|
/* 16 bit address */
|
2022-09-21 15:13:01 +03:00
|
|
|
tcg_gen_ext16u_tl(dest, a0);
|
|
|
|
a0 = dest;
|
2016-03-02 18:04:38 +03:00
|
|
|
if (ovr_seg < 0) {
|
2021-05-14 18:13:06 +03:00
|
|
|
if (ADDSEG(s)) {
|
2016-03-02 18:04:38 +03:00
|
|
|
ovr_seg = def_seg;
|
|
|
|
} else {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2013-11-06 02:27:33 +04:00
|
|
|
break;
|
|
|
|
default:
|
2023-04-05 22:09:14 +03:00
|
|
|
g_assert_not_reached();
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2015-12-17 22:19:17 +03:00
|
|
|
if (ovr_seg >= 0) {
|
2015-12-17 22:19:21 +03:00
|
|
|
TCGv seg = cpu_seg_base[ovr_seg];
|
2015-12-17 22:19:17 +03:00
|
|
|
|
|
|
|
if (aflag == MO_64) {
|
2022-09-21 15:13:01 +03:00
|
|
|
tcg_gen_add_tl(dest, a0, seg);
|
2015-12-17 22:19:17 +03:00
|
|
|
} else if (CODE64(s)) {
|
2022-09-21 15:13:01 +03:00
|
|
|
tcg_gen_ext32u_tl(dest, a0);
|
|
|
|
tcg_gen_add_tl(dest, dest, seg);
|
2003-10-01 00:34:21 +04:00
|
|
|
} else {
|
2022-09-21 15:13:01 +03:00
|
|
|
tcg_gen_add_tl(dest, a0, seg);
|
|
|
|
tcg_gen_ext32u_tl(dest, dest);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-21 15:13:01 +03:00
|
|
|
static void gen_lea_v_seg(DisasContext *s, MemOp aflag, TCGv a0,
|
|
|
|
int def_seg, int ovr_seg)
|
|
|
|
{
|
|
|
|
gen_lea_v_seg_dest(s, aflag, s->A0, a0, def_seg, ovr_seg);
|
|
|
|
}
|
|
|
|
|
2015-12-17 22:19:17 +03:00
|
|
|
static inline void gen_string_movl_A0_ESI(DisasContext *s)
|
|
|
|
{
|
2015-12-17 22:19:20 +03:00
|
|
|
gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
|
2015-12-17 22:19:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_string_movl_A0_EDI(DisasContext *s)
|
|
|
|
{
|
2015-12-17 22:19:20 +03:00
|
|
|
gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
|
2015-12-17 22:19:17 +03:00
|
|
|
}
|
|
|
|
|
2023-10-19 17:55:39 +03:00
|
|
|
static inline TCGv gen_compute_Dshift(DisasContext *s, MemOp ot)
|
2008-05-18 23:28:26 +04:00
|
|
|
{
|
2023-10-19 17:55:39 +03:00
|
|
|
TCGv dshift = tcg_temp_new();
|
|
|
|
tcg_gen_ld32s_tl(dshift, tcg_env, offsetof(CPUX86State, df));
|
|
|
|
tcg_gen_shli_tl(dshift, dshift, ot);
|
|
|
|
return dshift;
|
2003-10-01 00:34:21 +04:00
|
|
|
};
|
|
|
|
|
2019-08-23 21:10:58 +03:00
|
|
|
static TCGv gen_ext_tl(TCGv dst, TCGv src, MemOp size, bool sign)
|
2008-05-18 23:28:26 +04:00
|
|
|
{
|
2023-10-19 21:18:51 +03:00
|
|
|
if (size == MO_TL) {
|
2012-10-05 20:02:41 +04:00
|
|
|
return src;
|
2008-05-18 23:28:26 +04:00
|
|
|
}
|
2023-10-23 11:21:20 +03:00
|
|
|
if (!dst) {
|
|
|
|
dst = tcg_temp_new();
|
|
|
|
}
|
2023-10-19 21:18:51 +03:00
|
|
|
tcg_gen_ext_tl(dst, src, size | (sign ? MO_SIGN : 0));
|
|
|
|
return dst;
|
2008-05-18 23:28:26 +04:00
|
|
|
}
|
2007-09-17 12:09:54 +04:00
|
|
|
|
2019-08-23 21:10:58 +03:00
|
|
|
static void gen_extu(MemOp ot, TCGv reg)
|
2012-10-05 20:02:41 +04:00
|
|
|
{
|
|
|
|
gen_ext_tl(reg, reg, ot, false);
|
|
|
|
}
|
|
|
|
|
2019-08-23 21:10:58 +03:00
|
|
|
static void gen_exts(MemOp ot, TCGv reg)
|
2008-05-18 23:28:26 +04:00
|
|
|
{
|
2012-10-05 20:02:41 +04:00
|
|
|
gen_ext_tl(reg, reg, ot, true);
|
2008-05-18 23:28:26 +04:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2022-10-01 17:09:30 +03:00
|
|
|
static void gen_op_j_ecx(DisasContext *s, TCGCond cond, TCGLabel *label1)
|
2008-05-18 23:28:26 +04:00
|
|
|
{
|
2023-10-23 11:21:20 +03:00
|
|
|
TCGv tmp = gen_ext_tl(NULL, cpu_regs[R_ECX], s->aflag, false);
|
|
|
|
|
|
|
|
tcg_gen_brcondi_tl(cond, tmp, 0, label1);
|
2008-05-18 23:28:26 +04:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:30 +03:00
|
|
|
static inline void gen_op_jz_ecx(DisasContext *s, TCGLabel *label1)
|
2008-05-18 23:28:26 +04:00
|
|
|
{
|
2022-10-01 17:09:30 +03:00
|
|
|
gen_op_j_ecx(s, TCG_COND_EQ, label1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_op_jnz_ecx(DisasContext *s, TCGLabel *label1)
|
|
|
|
{
|
|
|
|
gen_op_j_ecx(s, TCG_COND_NE, label1);
|
2008-05-18 23:28:26 +04:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2019-08-23 21:10:58 +03:00
|
|
|
static void gen_helper_in_func(MemOp ot, TCGv v, TCGv_i32 n)
|
2008-11-17 17:43:54 +03:00
|
|
|
{
|
|
|
|
switch (ot) {
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_8:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_inb(v, tcg_env, n);
|
2012-10-06 03:56:03 +04:00
|
|
|
break;
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_16:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_inw(v, tcg_env, n);
|
2012-10-06 03:56:03 +04:00
|
|
|
break;
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_32:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_inl(v, tcg_env, n);
|
2012-10-06 03:56:03 +04:00
|
|
|
break;
|
2013-11-06 01:25:05 +04:00
|
|
|
default:
|
2023-04-05 22:09:14 +03:00
|
|
|
g_assert_not_reached();
|
2008-11-17 17:43:54 +03:00
|
|
|
}
|
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2019-08-23 21:10:58 +03:00
|
|
|
static void gen_helper_out_func(MemOp ot, TCGv_i32 v, TCGv_i32 n)
|
2008-11-17 17:43:54 +03:00
|
|
|
{
|
|
|
|
switch (ot) {
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_8:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_outb(tcg_env, v, n);
|
2012-10-06 03:56:03 +04:00
|
|
|
break;
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_16:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_outw(tcg_env, v, n);
|
2012-10-06 03:56:03 +04:00
|
|
|
break;
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_32:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_outl(tcg_env, v, n);
|
2012-10-06 03:56:03 +04:00
|
|
|
break;
|
2013-11-06 01:25:05 +04:00
|
|
|
default:
|
2023-04-05 22:09:14 +03:00
|
|
|
g_assert_not_reached();
|
2008-11-17 17:43:54 +03:00
|
|
|
}
|
|
|
|
}
|
2003-11-13 04:43:28 +03:00
|
|
|
|
2021-05-14 18:13:39 +03:00
|
|
|
/*
|
|
|
|
* Validate that access to [port, port + 1<<ot) is allowed.
|
|
|
|
* Raise #GP, or VMM exit if not.
|
|
|
|
*/
|
|
|
|
static bool gen_check_io(DisasContext *s, MemOp ot, TCGv_i32 port,
|
|
|
|
uint32_t svm_flags)
|
2003-11-13 04:43:28 +03:00
|
|
|
{
|
2021-05-14 18:13:41 +03:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
/*
|
|
|
|
* We do not implement the ioperm(2) syscall, so the TSS check
|
|
|
|
* will always fail.
|
|
|
|
*/
|
|
|
|
gen_exception_gpf(s);
|
|
|
|
return false;
|
|
|
|
#else
|
2021-05-14 18:13:01 +03:00
|
|
|
if (PE(s) && (CPL(s) > IOPL(s) || VM86(s))) {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_check_io(tcg_env, port, tcg_constant_i32(1 << ot));
|
2008-05-15 20:46:30 +04:00
|
|
|
}
|
2021-05-14 18:13:23 +03:00
|
|
|
if (GUEST(s)) {
|
2015-07-10 12:57:36 +03:00
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2021-05-14 18:13:38 +03:00
|
|
|
if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
|
|
|
|
svm_flags |= SVM_IOIO_REP_MASK;
|
|
|
|
}
|
|
|
|
svm_flags |= 1 << (SVM_IOIO_SIZE_SHIFT + ot);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_svm_check_io(tcg_env, port,
|
2021-05-14 18:13:38 +03:00
|
|
|
tcg_constant_i32(svm_flags),
|
2022-10-01 17:09:20 +03:00
|
|
|
cur_insn_len_i32(s));
|
2003-11-13 04:43:28 +03:00
|
|
|
}
|
2021-05-14 18:13:38 +03:00
|
|
|
return true;
|
2021-05-14 18:13:41 +03:00
|
|
|
#endif
|
2003-11-13 04:43:28 +03:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:21 +03:00
|
|
|
static void gen_movs(DisasContext *s, MemOp ot)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2023-10-19 17:55:39 +03:00
|
|
|
TCGv dshift;
|
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
gen_string_movl_A0_ESI(s);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T0, s->A0);
|
2003-10-01 00:34:21 +04:00
|
|
|
gen_string_movl_A0_EDI(s);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, ot, s->T0, s->A0);
|
2023-10-19 17:55:39 +03:00
|
|
|
|
|
|
|
dshift = gen_compute_Dshift(s, ot);
|
|
|
|
gen_op_add_reg(s, s->aflag, R_ESI, dshift);
|
|
|
|
gen_op_add_reg(s, s->aflag, R_EDI, dshift);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2018-09-11 21:48:41 +03:00
|
|
|
static void gen_op_update1_cc(DisasContext *s)
|
2008-05-17 16:44:31 +04:00
|
|
|
{
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_mov_tl(cpu_cc_dst, s->T0);
|
2008-05-17 16:44:31 +04:00
|
|
|
}
|
|
|
|
|
2018-09-11 21:48:41 +03:00
|
|
|
static void gen_op_update2_cc(DisasContext *s)
|
2008-05-17 16:44:31 +04:00
|
|
|
{
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_mov_tl(cpu_cc_src, s->T1);
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_mov_tl(cpu_cc_dst, s->T0);
|
2008-05-17 16:44:31 +04:00
|
|
|
}
|
|
|
|
|
2023-10-11 17:20:20 +03:00
|
|
|
/* compute all eflags to reg */
|
|
|
|
static void gen_mov_eflags(DisasContext *s, TCGv reg)
|
2008-05-21 23:16:45 +04:00
|
|
|
{
|
2023-10-11 17:20:20 +03:00
|
|
|
TCGv dst, src1, src2;
|
|
|
|
TCGv_i32 cc_op;
|
2013-01-24 04:10:49 +04:00
|
|
|
int live, dead;
|
|
|
|
|
2013-01-24 01:03:26 +04:00
|
|
|
if (s->cc_op == CC_OP_EFLAGS) {
|
2023-10-11 17:20:20 +03:00
|
|
|
tcg_gen_mov_tl(reg, cpu_cc_src);
|
2013-01-24 01:03:26 +04:00
|
|
|
return;
|
|
|
|
}
|
2013-01-30 01:38:43 +04:00
|
|
|
if (s->cc_op == CC_OP_CLR) {
|
2023-10-11 17:20:20 +03:00
|
|
|
tcg_gen_movi_tl(reg, CC_Z | CC_P);
|
2013-01-30 01:38:43 +04:00
|
|
|
return;
|
|
|
|
}
|
2013-01-24 04:10:49 +04:00
|
|
|
|
|
|
|
dst = cpu_cc_dst;
|
|
|
|
src1 = cpu_cc_src;
|
2013-01-24 04:03:16 +04:00
|
|
|
src2 = cpu_cc_src2;
|
2013-01-24 04:10:49 +04:00
|
|
|
|
|
|
|
/* Take care to not read values that are not live. */
|
|
|
|
live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
|
2013-01-24 04:03:16 +04:00
|
|
|
dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
|
2013-01-24 04:10:49 +04:00
|
|
|
if (dead) {
|
2023-10-11 17:20:20 +03:00
|
|
|
TCGv zero = tcg_constant_tl(0);
|
2013-01-24 04:10:49 +04:00
|
|
|
if (dead & USES_CC_DST) {
|
|
|
|
dst = zero;
|
|
|
|
}
|
|
|
|
if (dead & USES_CC_SRC) {
|
|
|
|
src1 = zero;
|
|
|
|
}
|
2013-01-24 04:03:16 +04:00
|
|
|
if (dead & USES_CC_SRC2) {
|
|
|
|
src2 = zero;
|
|
|
|
}
|
2013-01-24 04:10:49 +04:00
|
|
|
}
|
|
|
|
|
2023-10-11 17:20:20 +03:00
|
|
|
if (s->cc_op != CC_OP_DYNAMIC) {
|
|
|
|
cc_op = tcg_constant_i32(s->cc_op);
|
|
|
|
} else {
|
|
|
|
cc_op = cpu_cc_op;
|
|
|
|
}
|
|
|
|
gen_helper_cc_compute_all(reg, dst, src1, src2, cc_op);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* compute all eflags to cc_src */
|
|
|
|
static void gen_compute_eflags(DisasContext *s)
|
|
|
|
{
|
|
|
|
gen_mov_eflags(s, cpu_cc_src);
|
2013-01-24 01:03:26 +04:00
|
|
|
set_cc_op(s, CC_OP_EFLAGS);
|
2008-05-21 23:16:45 +04:00
|
|
|
}
|
|
|
|
|
2013-01-24 02:21:52 +04:00
|
|
|
typedef struct CCPrepare {
|
|
|
|
TCGCond cond;
|
|
|
|
TCGv reg;
|
|
|
|
TCGv reg2;
|
|
|
|
target_ulong imm;
|
|
|
|
bool use_reg2;
|
|
|
|
bool no_setcond;
|
|
|
|
} CCPrepare;
|
|
|
|
|
2023-10-27 06:57:31 +03:00
|
|
|
static CCPrepare gen_prepare_sign_nz(TCGv src, MemOp size)
|
|
|
|
{
|
|
|
|
if (size == MO_TL) {
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_LT, .reg = src };
|
2023-10-27 06:57:31 +03:00
|
|
|
} else {
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = src,
|
2023-10-27 06:57:31 +03:00
|
|
|
.imm = 1ull << ((8 << size) - 1) };
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-10 12:21:01 +03:00
|
|
|
/* compute eflags.C, trying to store it in reg if not NULL */
|
2013-01-24 02:21:52 +04:00
|
|
|
static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
|
2013-01-24 01:46:02 +04:00
|
|
|
{
|
2023-10-27 06:57:31 +03:00
|
|
|
MemOp size;
|
2013-01-24 01:46:02 +04:00
|
|
|
|
|
|
|
switch (s->cc_op) {
|
|
|
|
case CC_OP_SUBB ... CC_OP_SUBQ:
|
target-i386: optimize flags checking after sub using CC_SRCT
After a comparison or subtraction, the original value of the LHS will
currently be reconstructed using an addition. However, in most cases
it is already available: store it in a temp-local variable and save 1
or 2 TCG ops (2 if the result of the addition needs to be extended).
The temp-local can be declared dead as soon as the cc_op changes again,
or also before the translation block ends because gen_prepare_cc will
always make a copy before returning it. All this magic, plus copy
propagation and dead-code elimination, ensures that the temp local will
(almost) never be spilled.
Example (cmp $0x21,%rax + jbe):
Before After
----------------------------------------------------------------------------
movi_i64 tmp1,$0x21 movi_i64 tmp1,$0x21
movi_i64 cc_src,$0x21 movi_i64 cc_src,$0x21
sub_i64 cc_dst,rax,tmp1 sub_i64 cc_dst,rax,tmp1
add_i64 tmp7,cc_dst,cc_src
movi_i32 cc_op,$0x11 movi_i32 cc_op,$0x11
brcond_i64 tmp7,cc_src,leu,$0x0 discard loc11
brcond_i64 rax,cc_src,leu,$0x0
Before After
----------------------------------------------------------------------------
mov (%r14),%rbp mov (%r14),%rbp
mov %rbp,%rbx mov %rbp,%rbx
sub $0x21,%rbx sub $0x21,%rbx
lea 0x21(%rbx),%r12
movl $0x11,0xa0(%r14) movl $0x11,0xa0(%r14)
movq $0x21,0x90(%r14) movq $0x21,0x90(%r14)
mov %rbx,0x98(%r14) mov %rbx,0x98(%r14)
cmp $0x21,%r12 | cmp $0x21,%rbp
jbe ... jbe ...
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-01-24 03:43:03 +04:00
|
|
|
/* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
|
2013-01-24 01:46:02 +04:00
|
|
|
size = s->cc_op - CC_OP_SUBB;
|
2023-10-21 10:35:58 +03:00
|
|
|
gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
|
|
|
|
gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
|
|
|
|
return (CCPrepare) { .cond = TCG_COND_LTU, .reg = s->cc_srcT,
|
|
|
|
.reg2 = cpu_cc_src, .use_reg2 = true };
|
2013-01-24 01:46:02 +04:00
|
|
|
|
|
|
|
case CC_OP_ADDB ... CC_OP_ADDQ:
|
|
|
|
/* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
|
|
|
|
size = s->cc_op - CC_OP_ADDB;
|
2023-10-21 10:35:58 +03:00
|
|
|
gen_ext_tl(cpu_cc_dst, cpu_cc_dst, size, false);
|
|
|
|
gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
|
|
|
|
return (CCPrepare) { .cond = TCG_COND_LTU, .reg = cpu_cc_dst,
|
|
|
|
.reg2 = cpu_cc_src, .use_reg2 = true };
|
2013-01-24 01:46:02 +04:00
|
|
|
|
|
|
|
case CC_OP_LOGICB ... CC_OP_LOGICQ:
|
2013-01-30 01:38:43 +04:00
|
|
|
case CC_OP_CLR:
|
2016-11-21 14:18:53 +03:00
|
|
|
case CC_OP_POPCNT:
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_NEVER };
|
2013-01-24 01:46:02 +04:00
|
|
|
|
|
|
|
case CC_OP_INCB ... CC_OP_INCQ:
|
|
|
|
case CC_OP_DECB ... CC_OP_DECQ:
|
2013-01-24 02:21:52 +04:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
|
2023-10-27 05:17:09 +03:00
|
|
|
.no_setcond = true };
|
2013-01-24 01:46:02 +04:00
|
|
|
|
|
|
|
case CC_OP_SHLB ... CC_OP_SHLQ:
|
|
|
|
/* (CC_SRC >> (DATA_BITS - 1)) & 1 */
|
|
|
|
size = s->cc_op - CC_OP_SHLB;
|
2023-10-27 06:57:31 +03:00
|
|
|
return gen_prepare_sign_nz(cpu_cc_src, size);
|
2013-01-24 01:46:02 +04:00
|
|
|
|
|
|
|
case CC_OP_MULB ... CC_OP_MULQ:
|
2013-01-24 02:21:52 +04:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_NE,
|
2023-10-27 05:17:09 +03:00
|
|
|
.reg = cpu_cc_src };
|
2013-01-24 01:46:02 +04:00
|
|
|
|
2013-01-24 04:44:37 +04:00
|
|
|
case CC_OP_BMILGB ... CC_OP_BMILGQ:
|
|
|
|
size = s->cc_op - CC_OP_BMILGB;
|
2023-10-21 10:35:58 +03:00
|
|
|
gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
|
|
|
|
return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src };
|
2013-01-24 04:44:37 +04:00
|
|
|
|
2013-01-24 06:17:33 +04:00
|
|
|
case CC_OP_ADCX:
|
|
|
|
case CC_OP_ADCOX:
|
|
|
|
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
|
2023-10-27 05:17:09 +03:00
|
|
|
.no_setcond = true };
|
2013-01-24 06:17:33 +04:00
|
|
|
|
2013-01-24 01:46:02 +04:00
|
|
|
case CC_OP_EFLAGS:
|
|
|
|
case CC_OP_SARB ... CC_OP_SARQ:
|
|
|
|
/* CC_SRC & 1 */
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_TSTNE,
|
2023-10-27 05:17:09 +03:00
|
|
|
.reg = cpu_cc_src, .imm = CC_C };
|
2013-01-24 01:46:02 +04:00
|
|
|
|
|
|
|
default:
|
|
|
|
/* The need to compute only C from CC_OP_DYNAMIC is important
|
|
|
|
in efficiently implementing e.g. INC at the start of a TB. */
|
|
|
|
gen_update_cc_op(s);
|
2024-04-10 12:57:15 +03:00
|
|
|
if (!reg) {
|
|
|
|
reg = tcg_temp_new();
|
|
|
|
}
|
2013-01-24 04:03:16 +04:00
|
|
|
gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
|
|
|
|
cpu_cc_src2, cpu_cc_op);
|
2013-01-24 02:21:52 +04:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
|
2023-10-27 05:17:09 +03:00
|
|
|
.no_setcond = true };
|
2013-01-24 01:46:02 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-10 12:21:01 +03:00
|
|
|
/* compute eflags.P, trying to store it in reg if not NULL */
|
2013-01-24 02:21:52 +04:00
|
|
|
static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
|
2012-10-05 20:42:59 +04:00
|
|
|
{
|
2013-01-24 01:03:26 +04:00
|
|
|
gen_compute_eflags(s);
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
|
2023-10-27 05:17:09 +03:00
|
|
|
.imm = CC_P };
|
2012-10-05 20:42:59 +04:00
|
|
|
}
|
|
|
|
|
2024-04-10 12:21:01 +03:00
|
|
|
/* compute eflags.S, trying to store it in reg if not NULL */
|
2013-01-24 02:21:52 +04:00
|
|
|
static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
|
2012-10-05 20:42:59 +04:00
|
|
|
{
|
2013-01-24 01:33:59 +04:00
|
|
|
switch (s->cc_op) {
|
|
|
|
case CC_OP_DYNAMIC:
|
|
|
|
gen_compute_eflags(s);
|
|
|
|
/* FALLTHRU */
|
|
|
|
case CC_OP_EFLAGS:
|
2013-01-24 06:17:33 +04:00
|
|
|
case CC_OP_ADCX:
|
|
|
|
case CC_OP_ADOX:
|
|
|
|
case CC_OP_ADCOX:
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
|
2023-10-27 05:17:09 +03:00
|
|
|
.imm = CC_S };
|
2013-01-30 01:38:43 +04:00
|
|
|
case CC_OP_CLR:
|
2016-11-21 14:18:53 +03:00
|
|
|
case CC_OP_POPCNT:
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_NEVER };
|
2013-01-24 01:33:59 +04:00
|
|
|
default:
|
|
|
|
{
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
|
2023-10-27 06:57:31 +03:00
|
|
|
return gen_prepare_sign_nz(cpu_cc_dst, size);
|
2013-01-24 01:33:59 +04:00
|
|
|
}
|
|
|
|
}
|
2012-10-05 20:42:59 +04:00
|
|
|
}
|
|
|
|
|
2024-04-10 12:21:01 +03:00
|
|
|
/* compute eflags.O, trying to store it in reg if not NULL */
|
2013-01-24 02:21:52 +04:00
|
|
|
static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
|
2012-10-05 20:42:59 +04:00
|
|
|
{
|
2013-01-24 06:17:33 +04:00
|
|
|
switch (s->cc_op) {
|
|
|
|
case CC_OP_ADOX:
|
|
|
|
case CC_OP_ADCOX:
|
|
|
|
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
|
2023-10-27 05:17:09 +03:00
|
|
|
.no_setcond = true };
|
2013-01-30 01:38:43 +04:00
|
|
|
case CC_OP_CLR:
|
2016-11-21 14:18:53 +03:00
|
|
|
case CC_OP_POPCNT:
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_NEVER };
|
2023-10-25 14:13:13 +03:00
|
|
|
case CC_OP_MULB ... CC_OP_MULQ:
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src };
|
2013-01-24 06:17:33 +04:00
|
|
|
default:
|
|
|
|
gen_compute_eflags(s);
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
|
2023-10-27 05:17:09 +03:00
|
|
|
.imm = CC_O };
|
2013-01-24 06:17:33 +04:00
|
|
|
}
|
2012-10-05 20:42:59 +04:00
|
|
|
}
|
|
|
|
|
2024-04-10 12:21:01 +03:00
|
|
|
/* compute eflags.Z, trying to store it in reg if not NULL */
|
2013-01-24 02:21:52 +04:00
|
|
|
static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
|
2012-10-05 20:42:59 +04:00
|
|
|
{
|
2013-01-24 01:33:59 +04:00
|
|
|
switch (s->cc_op) {
|
|
|
|
case CC_OP_DYNAMIC:
|
|
|
|
gen_compute_eflags(s);
|
|
|
|
/* FALLTHRU */
|
|
|
|
case CC_OP_EFLAGS:
|
2013-01-24 06:17:33 +04:00
|
|
|
case CC_OP_ADCX:
|
|
|
|
case CC_OP_ADOX:
|
|
|
|
case CC_OP_ADCOX:
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
|
2023-10-27 05:17:09 +03:00
|
|
|
.imm = CC_Z };
|
2013-01-30 01:38:43 +04:00
|
|
|
case CC_OP_CLR:
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_ALWAYS };
|
2016-11-21 14:18:53 +03:00
|
|
|
case CC_OP_POPCNT:
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src };
|
2013-01-24 01:33:59 +04:00
|
|
|
default:
|
|
|
|
{
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp size = (s->cc_op - CC_OP_ADDB) & 3;
|
2023-10-27 06:57:31 +03:00
|
|
|
if (size == MO_TL) {
|
2023-10-27 05:17:09 +03:00
|
|
|
return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_dst };
|
2023-10-27 06:57:31 +03:00
|
|
|
} else {
|
|
|
|
return (CCPrepare) { .cond = TCG_COND_TSTEQ, .reg = cpu_cc_dst,
|
2023-10-27 05:17:09 +03:00
|
|
|
.imm = (1ull << (8 << size)) - 1 };
|
2023-10-27 06:57:31 +03:00
|
|
|
}
|
2013-01-24 01:33:59 +04:00
|
|
|
}
|
2013-01-24 02:21:52 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-10 12:21:01 +03:00
|
|
|
/* return how to compute jump opcode 'b'. 'reg' can be clobbered
|
|
|
|
* if needed; it may be used for CCPrepare.reg if that will
|
|
|
|
* provide more freedom in the translation of a subsequent setcond. */
|
2013-01-24 02:33:45 +04:00
|
|
|
static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
|
2008-05-21 23:16:45 +04:00
|
|
|
{
|
2013-11-06 01:25:05 +04:00
|
|
|
int inv, jcc_op, cond;
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp size;
|
2013-01-24 02:33:45 +04:00
|
|
|
CCPrepare cc;
|
2012-10-06 01:00:10 +04:00
|
|
|
|
|
|
|
inv = b & 1;
|
2008-05-21 23:16:45 +04:00
|
|
|
jcc_op = (b >> 1) & 7;
|
2012-10-06 01:00:10 +04:00
|
|
|
|
|
|
|
switch (s->cc_op) {
|
2013-01-24 02:41:21 +04:00
|
|
|
case CC_OP_SUBB ... CC_OP_SUBQ:
|
|
|
|
/* We optimize relational operators for the cmp/jcc case. */
|
2012-10-06 01:00:10 +04:00
|
|
|
size = s->cc_op - CC_OP_SUBB;
|
|
|
|
switch (jcc_op) {
|
|
|
|
case JCC_BE:
|
2023-10-21 10:35:58 +03:00
|
|
|
gen_ext_tl(s->cc_srcT, s->cc_srcT, size, false);
|
|
|
|
gen_ext_tl(cpu_cc_src, cpu_cc_src, size, false);
|
|
|
|
cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = s->cc_srcT,
|
|
|
|
.reg2 = cpu_cc_src, .use_reg2 = true };
|
2012-10-06 01:00:10 +04:00
|
|
|
break;
|
|
|
|
case JCC_L:
|
2013-01-24 02:33:45 +04:00
|
|
|
cond = TCG_COND_LT;
|
2012-10-06 01:00:10 +04:00
|
|
|
goto fast_jcc_l;
|
|
|
|
case JCC_LE:
|
2013-01-24 02:33:45 +04:00
|
|
|
cond = TCG_COND_LE;
|
2012-10-06 01:00:10 +04:00
|
|
|
fast_jcc_l:
|
2023-10-21 10:35:58 +03:00
|
|
|
gen_ext_tl(s->cc_srcT, s->cc_srcT, size, true);
|
|
|
|
gen_ext_tl(cpu_cc_src, cpu_cc_src, size, true);
|
|
|
|
cc = (CCPrepare) { .cond = cond, .reg = s->cc_srcT,
|
|
|
|
.reg2 = cpu_cc_src, .use_reg2 = true };
|
2012-10-06 01:00:10 +04:00
|
|
|
break;
|
2008-05-21 23:16:45 +04:00
|
|
|
|
2012-10-06 01:00:10 +04:00
|
|
|
default:
|
2008-05-21 23:16:45 +04:00
|
|
|
goto slow_jcc;
|
2012-10-06 01:00:10 +04:00
|
|
|
}
|
2008-05-21 23:16:45 +04:00
|
|
|
break;
|
2012-10-06 01:00:10 +04:00
|
|
|
|
2008-05-21 23:16:45 +04:00
|
|
|
default:
|
|
|
|
slow_jcc:
|
2013-01-24 02:41:21 +04:00
|
|
|
/* This actually generates good code for JC, JZ and JS. */
|
|
|
|
switch (jcc_op) {
|
|
|
|
case JCC_O:
|
|
|
|
cc = gen_prepare_eflags_o(s, reg);
|
|
|
|
break;
|
|
|
|
case JCC_B:
|
|
|
|
cc = gen_prepare_eflags_c(s, reg);
|
|
|
|
break;
|
|
|
|
case JCC_Z:
|
|
|
|
cc = gen_prepare_eflags_z(s, reg);
|
|
|
|
break;
|
|
|
|
case JCC_BE:
|
|
|
|
gen_compute_eflags(s);
|
2023-10-27 05:17:09 +03:00
|
|
|
cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = cpu_cc_src,
|
2023-10-27 05:17:09 +03:00
|
|
|
.imm = CC_Z | CC_C };
|
2013-01-24 02:41:21 +04:00
|
|
|
break;
|
|
|
|
case JCC_S:
|
|
|
|
cc = gen_prepare_eflags_s(s, reg);
|
|
|
|
break;
|
|
|
|
case JCC_P:
|
|
|
|
cc = gen_prepare_eflags_p(s, reg);
|
|
|
|
break;
|
|
|
|
case JCC_L:
|
|
|
|
gen_compute_eflags(s);
|
2024-04-10 12:57:15 +03:00
|
|
|
if (!reg || reg == cpu_cc_src) {
|
|
|
|
reg = tcg_temp_new();
|
2013-01-24 02:41:21 +04:00
|
|
|
}
|
2023-10-27 05:12:59 +03:00
|
|
|
tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
|
2023-10-27 05:17:09 +03:00
|
|
|
cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
|
2023-10-27 05:17:09 +03:00
|
|
|
.imm = CC_O };
|
2013-01-24 02:41:21 +04:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case JCC_LE:
|
|
|
|
gen_compute_eflags(s);
|
2024-04-10 12:57:15 +03:00
|
|
|
if (!reg || reg == cpu_cc_src) {
|
|
|
|
reg = tcg_temp_new();
|
2013-01-24 02:41:21 +04:00
|
|
|
}
|
2023-10-27 05:12:59 +03:00
|
|
|
tcg_gen_addi_tl(reg, cpu_cc_src, CC_O - CC_S);
|
2023-10-27 05:17:09 +03:00
|
|
|
cc = (CCPrepare) { .cond = TCG_COND_TSTNE, .reg = reg,
|
2023-10-27 05:17:09 +03:00
|
|
|
.imm = CC_O | CC_Z };
|
2013-01-24 02:41:21 +04:00
|
|
|
break;
|
|
|
|
}
|
2012-10-06 01:00:10 +04:00
|
|
|
break;
|
2008-05-21 23:16:45 +04:00
|
|
|
}
|
2013-01-24 02:33:45 +04:00
|
|
|
|
|
|
|
if (inv) {
|
|
|
|
cc.cond = tcg_invert_cond(cc.cond);
|
|
|
|
}
|
|
|
|
return cc;
|
2008-05-21 23:16:45 +04:00
|
|
|
}
|
|
|
|
|
2012-10-08 11:42:48 +04:00
|
|
|
static void gen_setcc1(DisasContext *s, int b, TCGv reg)
|
|
|
|
{
|
|
|
|
CCPrepare cc = gen_prepare_cc(s, b, reg);
|
|
|
|
|
|
|
|
if (cc.no_setcond) {
|
|
|
|
if (cc.cond == TCG_COND_EQ) {
|
|
|
|
tcg_gen_xori_tl(reg, cc.reg, 1);
|
|
|
|
} else {
|
|
|
|
tcg_gen_mov_tl(reg, cc.reg);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cc.use_reg2) {
|
|
|
|
tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
|
|
|
|
} else {
|
|
|
|
tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
|
|
|
|
{
|
|
|
|
gen_setcc1(s, JCC_B << 1, reg);
|
|
|
|
}
|
2013-01-24 02:33:45 +04:00
|
|
|
|
2008-05-21 23:16:45 +04:00
|
|
|
/* generate a conditional jump to label 'l1' according to jump opcode
|
2023-07-14 14:16:12 +03:00
|
|
|
value 'b'. In the fast case, T0 is guaranteed not to be used. */
|
2015-02-13 23:51:55 +03:00
|
|
|
static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
|
2013-01-24 03:01:35 +04:00
|
|
|
{
|
2024-04-10 12:57:15 +03:00
|
|
|
CCPrepare cc = gen_prepare_cc(s, b, NULL);
|
2013-01-24 03:01:35 +04:00
|
|
|
|
|
|
|
if (cc.use_reg2) {
|
|
|
|
tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
|
|
|
|
} else {
|
|
|
|
tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate a conditional jump to label 'l1' according to jump opcode
|
2023-07-14 14:16:12 +03:00
|
|
|
value 'b'. In the fast case, T0 is guaranteed not to be used.
|
2024-04-10 13:29:52 +03:00
|
|
|
One or both of the branches will call gen_jmp_rel, so ensure
|
|
|
|
cc_op is clean. */
|
2015-02-13 23:51:55 +03:00
|
|
|
static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
|
2008-05-21 23:16:45 +04:00
|
|
|
{
|
2024-04-10 12:57:15 +03:00
|
|
|
CCPrepare cc = gen_prepare_cc(s, b, NULL);
|
2008-05-21 23:16:45 +04:00
|
|
|
|
2013-01-24 03:01:35 +04:00
|
|
|
gen_update_cc_op(s);
|
2012-10-07 17:53:23 +04:00
|
|
|
if (cc.use_reg2) {
|
|
|
|
tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
|
|
|
|
} else {
|
|
|
|
tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
|
2008-05-21 23:16:45 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-01-04 02:50:08 +03:00
|
|
|
/* XXX: does not work with gdbstub "ice" single step - not a
|
2024-04-10 13:29:52 +03:00
|
|
|
serious problem. The caller can jump to the returned label
|
|
|
|
to stop the REP but, if the flags have changed, it has to call
|
|
|
|
gen_update_cc_op before doing so. */
|
2022-10-01 17:09:21 +03:00
|
|
|
static TCGLabel *gen_jz_ecx_string(DisasContext *s)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2015-02-13 23:51:55 +03:00
|
|
|
TCGLabel *l1 = gen_new_label();
|
|
|
|
TCGLabel *l2 = gen_new_label();
|
2024-04-10 13:29:52 +03:00
|
|
|
|
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:30 +03:00
|
|
|
gen_op_jnz_ecx(s, l1);
|
2005-01-04 02:50:08 +03:00
|
|
|
gen_set_label(l2);
|
2022-10-01 17:09:27 +03:00
|
|
|
gen_jmp_rel_csize(s, 0, 1);
|
2005-01-04 02:50:08 +03:00
|
|
|
gen_set_label(l1);
|
|
|
|
return l2;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:21 +03:00
|
|
|
static void gen_stos(DisasContext *s, MemOp ot)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
|
|
|
gen_string_movl_A0_EDI(s);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, ot, s->T0, s->A0);
|
2023-10-19 17:55:39 +03:00
|
|
|
gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:21 +03:00
|
|
|
static void gen_lods(DisasContext *s, MemOp ot)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
|
|
|
gen_string_movl_A0_ESI(s);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T0, s->A0);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, R_EAX, s->T0);
|
2023-10-19 17:55:39 +03:00
|
|
|
gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:21 +03:00
|
|
|
static void gen_scas(DisasContext *s, MemOp ot)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
|
|
|
gen_string_movl_A0_EDI(s);
|
2018-09-11 21:50:46 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T1, s->A0);
|
2023-10-23 09:52:36 +03:00
|
|
|
tcg_gen_mov_tl(cpu_cc_src, s->T1);
|
|
|
|
tcg_gen_mov_tl(s->cc_srcT, s->T0);
|
|
|
|
tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
|
|
|
|
set_cc_op(s, CC_OP_SUBB + ot);
|
|
|
|
|
2023-10-19 17:55:39 +03:00
|
|
|
gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:21 +03:00
|
|
|
static void gen_cmps(DisasContext *s, MemOp ot)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2023-10-19 17:55:39 +03:00
|
|
|
TCGv dshift;
|
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
gen_string_movl_A0_EDI(s);
|
2018-09-11 21:50:46 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T1, s->A0);
|
2013-01-24 02:51:34 +04:00
|
|
|
gen_string_movl_A0_ESI(s);
|
2023-10-11 11:55:16 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T0, s->A0);
|
|
|
|
tcg_gen_mov_tl(cpu_cc_src, s->T1);
|
|
|
|
tcg_gen_mov_tl(s->cc_srcT, s->T0);
|
|
|
|
tcg_gen_sub_tl(cpu_cc_dst, s->T0, s->T1);
|
|
|
|
set_cc_op(s, CC_OP_SUBB + ot);
|
2023-10-19 17:55:39 +03:00
|
|
|
|
|
|
|
dshift = gen_compute_Dshift(s, ot);
|
|
|
|
gen_op_add_reg(s, s->aflag, R_ESI, dshift);
|
|
|
|
gen_op_add_reg(s, s->aflag, R_EDI, dshift);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2015-10-19 20:14:35 +03:00
|
|
|
static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
|
|
|
|
{
|
|
|
|
if (s->flags & HF_IOBPT_MASK) {
|
2021-03-22 16:27:49 +03:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
/* user-mode cpu should not be in IOBPT mode */
|
|
|
|
g_assert_not_reached();
|
|
|
|
#else
|
2022-10-01 17:09:24 +03:00
|
|
|
TCGv_i32 t_size = tcg_constant_i32(1 << ot);
|
|
|
|
TCGv t_next = eip_next_tl(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_bpt_io(tcg_env, t_port, t_size, t_next);
|
2021-03-22 16:27:49 +03:00
|
|
|
#endif /* CONFIG_USER_ONLY */
|
2015-10-19 20:14:35 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:21 +03:00
|
|
|
static void gen_ins(DisasContext *s, MemOp ot)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
|
|
|
gen_string_movl_A0_EDI(s);
|
2008-05-18 23:28:26 +04:00
|
|
|
/* Note: we must do this dummy write first to be restartable in
|
|
|
|
case of page fault. */
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_movi_tl(s->T0, 0);
|
|
|
|
gen_op_st_v(s, ot, s->T0, s->A0);
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
|
|
|
|
tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
|
|
|
|
gen_helper_in_func(ot, s->T0, s->tmp2_i32);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, ot, s->T0, s->A0);
|
2023-10-19 17:55:39 +03:00
|
|
|
gen_op_add_reg(s, s->aflag, R_EDI, gen_compute_Dshift(s, ot));
|
2018-09-11 21:17:18 +03:00
|
|
|
gen_bpt_io(s, s->tmp2_i32, ot);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:21 +03:00
|
|
|
static void gen_outs(DisasContext *s, MemOp ot)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
|
|
|
gen_string_movl_A0_ESI(s);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T0, s->A0);
|
2008-05-15 20:46:30 +04:00
|
|
|
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_EDX]);
|
|
|
|
tcg_gen_andi_i32(s->tmp2_i32, s->tmp2_i32, 0xffff);
|
2018-09-11 21:17:56 +03:00
|
|
|
tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T0);
|
|
|
|
gen_helper_out_func(ot, s->tmp2_i32, s->tmp3_i32);
|
2023-10-19 17:55:39 +03:00
|
|
|
gen_op_add_reg(s, s->aflag, R_ESI, gen_compute_Dshift(s, ot));
|
2018-09-11 21:17:18 +03:00
|
|
|
gen_bpt_io(s, s->tmp2_i32, ot);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:21 +03:00
|
|
|
/* Generate jumps to current or next instruction */
|
|
|
|
static void gen_repz(DisasContext *s, MemOp ot,
|
|
|
|
void (*fn)(DisasContext *s, MemOp ot))
|
|
|
|
{
|
|
|
|
TCGLabel *l2;
|
|
|
|
l2 = gen_jz_ecx_string(s);
|
|
|
|
fn(s, ot);
|
|
|
|
gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
|
|
|
|
/*
|
|
|
|
* A loop would cause two single step exceptions if ECX = 1
|
|
|
|
* before rep string_insn
|
|
|
|
*/
|
|
|
|
if (s->repz_opt) {
|
2022-10-01 17:09:30 +03:00
|
|
|
gen_op_jz_ecx(s, l2);
|
2022-10-01 17:09:21 +03:00
|
|
|
}
|
2022-10-01 17:09:27 +03:00
|
|
|
gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:21 +03:00
|
|
|
#define GEN_REPZ(op) \
|
|
|
|
static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
|
|
|
|
{ gen_repz(s, ot, gen_##op); }
|
|
|
|
|
|
|
|
static void gen_repz2(DisasContext *s, MemOp ot, int nz,
|
|
|
|
void (*fn)(DisasContext *s, MemOp ot))
|
|
|
|
{
|
|
|
|
TCGLabel *l2;
|
|
|
|
l2 = gen_jz_ecx_string(s);
|
|
|
|
fn(s, ot);
|
|
|
|
gen_op_add_reg_im(s, s->aflag, R_ECX, -1);
|
|
|
|
gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2);
|
|
|
|
if (s->repz_opt) {
|
2022-10-01 17:09:30 +03:00
|
|
|
gen_op_jz_ecx(s, l2);
|
2022-10-01 17:09:21 +03:00
|
|
|
}
|
2024-04-10 13:29:52 +03:00
|
|
|
/*
|
|
|
|
* Only one iteration is done at a time, so the translation
|
|
|
|
* block ends unconditionally after this instruction and there
|
|
|
|
* is no control flow junction - no need to set CC_OP_DYNAMIC.
|
|
|
|
*/
|
2022-10-01 17:09:27 +03:00
|
|
|
gen_jmp_rel_csize(s, -cur_insn_len(s), 0);
|
2022-10-01 17:09:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#define GEN_REPZ2(op) \
|
|
|
|
static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
|
|
|
|
{ gen_repz2(s, ot, nz, gen_##op); }
|
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
GEN_REPZ(movs)
|
|
|
|
GEN_REPZ(stos)
|
|
|
|
GEN_REPZ(lods)
|
|
|
|
GEN_REPZ(ins)
|
|
|
|
GEN_REPZ(outs)
|
|
|
|
GEN_REPZ2(scas)
|
|
|
|
GEN_REPZ2(cmps)
|
|
|
|
|
2008-11-17 17:43:54 +03:00
|
|
|
static void gen_helper_fp_arith_ST0_FT0(int op)
|
|
|
|
{
|
|
|
|
switch (op) {
|
2012-04-29 01:28:09 +04:00
|
|
|
case 0:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fadd_ST0_FT0(tcg_env);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 1:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fmul_ST0_FT0(tcg_env);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 2:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fcom_ST0_FT0(tcg_env);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 3:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fcom_ST0_FT0(tcg_env);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 4:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fsub_ST0_FT0(tcg_env);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 5:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fsubr_ST0_FT0(tcg_env);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 6:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fdiv_ST0_FT0(tcg_env);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 7:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fdivr_ST0_FT0(tcg_env);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
2008-11-17 17:43:54 +03:00
|
|
|
}
|
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
|
|
|
|
/* NOTE the exception in "r" op ordering */
|
2008-11-17 17:43:54 +03:00
|
|
|
static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
|
|
|
|
{
|
2023-02-26 02:26:02 +03:00
|
|
|
TCGv_i32 tmp = tcg_constant_i32(opreg);
|
2008-11-17 17:43:54 +03:00
|
|
|
switch (op) {
|
2012-04-29 01:28:09 +04:00
|
|
|
case 0:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fadd_STN_ST0(tcg_env, tmp);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 1:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fmul_STN_ST0(tcg_env, tmp);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 4:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fsubr_STN_ST0(tcg_env, tmp);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 5:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fsub_STN_ST0(tcg_env, tmp);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 6:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fdivr_STN_ST0(tcg_env, tmp);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
|
|
|
case 7:
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fdiv_STN_ST0(tcg_env, tmp);
|
2012-04-29 01:28:09 +04:00
|
|
|
break;
|
2008-11-17 17:43:54 +03:00
|
|
|
}
|
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2022-10-01 17:09:12 +03:00
|
|
|
static void gen_exception(DisasContext *s, int trapno)
|
2018-11-13 22:35:10 +03:00
|
|
|
{
|
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_raise_exception(tcg_env, tcg_constant_i32(trapno));
|
2018-11-13 22:35:10 +03:00
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate #UD for the current instruction. The assumption here is that
|
|
|
|
the instruction is known, but it isn't allowed in the current cpu mode. */
|
|
|
|
static void gen_illegal_opcode(DisasContext *s)
|
|
|
|
{
|
2022-10-01 17:09:12 +03:00
|
|
|
gen_exception(s, EXCP06_ILLOP);
|
2018-11-13 22:35:10 +03:00
|
|
|
}
|
|
|
|
|
2021-05-14 18:12:53 +03:00
|
|
|
/* Generate #GP for the current instruction. */
|
|
|
|
static void gen_exception_gpf(DisasContext *s)
|
|
|
|
{
|
2022-10-01 17:09:12 +03:00
|
|
|
gen_exception(s, EXCP0D_GPF);
|
2021-05-14 18:12:53 +03:00
|
|
|
}
|
|
|
|
|
2021-05-14 18:12:54 +03:00
|
|
|
/* Check for cpl == 0; if not, raise #GP and return false. */
|
|
|
|
static bool check_cpl0(DisasContext *s)
|
|
|
|
{
|
2021-05-14 18:12:59 +03:00
|
|
|
if (CPL(s) == 0) {
|
2021-05-14 18:12:54 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
gen_exception_gpf(s);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-08-23 21:10:58 +03:00
|
|
|
static void gen_shift_flags(DisasContext *s, MemOp ot, TCGv result,
|
2013-11-06 01:25:05 +04:00
|
|
|
TCGv shm1, TCGv count, bool is_right)
|
2013-02-20 09:06:31 +04:00
|
|
|
{
|
|
|
|
TCGv_i32 z32, s32, oldop;
|
|
|
|
TCGv z_tl;
|
|
|
|
|
|
|
|
/* Store the results into the CC variables. If we know that the
|
|
|
|
variable must be dead, store unconditionally. Otherwise we'll
|
|
|
|
need to not disrupt the current contents. */
|
2023-02-26 02:26:02 +03:00
|
|
|
z_tl = tcg_constant_tl(0);
|
2013-02-20 09:06:31 +04:00
|
|
|
if (cc_op_live[s->cc_op] & USES_CC_DST) {
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
|
|
|
|
result, cpu_cc_dst);
|
|
|
|
} else {
|
|
|
|
tcg_gen_mov_tl(cpu_cc_dst, result);
|
|
|
|
}
|
|
|
|
if (cc_op_live[s->cc_op] & USES_CC_SRC) {
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
|
|
|
|
shm1, cpu_cc_src);
|
|
|
|
} else {
|
|
|
|
tcg_gen_mov_tl(cpu_cc_src, shm1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the two potential CC_OP values into temporaries. */
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_movi_i32(s->tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
|
2013-02-20 09:06:31 +04:00
|
|
|
if (s->cc_op == CC_OP_DYNAMIC) {
|
|
|
|
oldop = cpu_cc_op;
|
|
|
|
} else {
|
2018-09-11 21:17:56 +03:00
|
|
|
tcg_gen_movi_i32(s->tmp3_i32, s->cc_op);
|
|
|
|
oldop = s->tmp3_i32;
|
2013-02-20 09:06:31 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Conditionally store the CC_OP value. */
|
2023-02-26 02:26:02 +03:00
|
|
|
z32 = tcg_constant_i32(0);
|
2013-02-20 09:06:31 +04:00
|
|
|
s32 = tcg_temp_new_i32();
|
|
|
|
tcg_gen_trunc_tl_i32(s32, count);
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, s->tmp2_i32, oldop);
|
2013-02-20 09:06:31 +04:00
|
|
|
|
|
|
|
/* The CC_OP value is no longer predictable. */
|
|
|
|
set_cc_op(s, CC_OP_DYNAMIC);
|
|
|
|
}
|
|
|
|
|
2008-05-17 16:44:31 +04:00
|
|
|
/* XXX: add faster immediate case */
|
2019-08-23 21:10:58 +03:00
|
|
|
static void gen_shiftd_rm_T1(DisasContext *s, MemOp ot, int op1,
|
2013-02-20 09:06:31 +04:00
|
|
|
bool is_right, TCGv count_in)
|
2008-05-17 16:44:31 +04:00
|
|
|
{
|
2013-11-02 20:54:47 +04:00
|
|
|
target_ulong mask = (ot == MO_64 ? 63 : 31);
|
2013-02-20 09:06:31 +04:00
|
|
|
TCGv count;
|
2008-05-17 16:44:31 +04:00
|
|
|
|
|
|
|
/* load */
|
2008-05-25 21:26:41 +04:00
|
|
|
if (op1 == OR_TMP0) {
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T0, s->A0);
|
2008-05-25 21:26:41 +04:00
|
|
|
} else {
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, s->T0, op1);
|
2008-05-25 21:26:41 +04:00
|
|
|
}
|
2008-05-17 16:44:31 +04:00
|
|
|
|
2013-02-20 09:06:31 +04:00
|
|
|
count = tcg_temp_new();
|
|
|
|
tcg_gen_andi_tl(count, count_in, mask);
|
2008-05-25 21:26:41 +04:00
|
|
|
|
2013-02-20 09:06:31 +04:00
|
|
|
switch (ot) {
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_16:
|
2013-02-20 09:06:31 +04:00
|
|
|
/* Note: we implement the Intel behaviour for shift count > 16.
|
|
|
|
This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
|
|
|
|
portion by constructing it as a 32-bit value. */
|
2008-05-17 16:44:31 +04:00
|
|
|
if (is_right) {
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_deposit_tl(s->tmp0, s->T0, s->T1, 16, 16);
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_mov_tl(s->T1, s->T0);
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_mov_tl(s->T0, s->tmp0);
|
2008-05-17 16:44:31 +04:00
|
|
|
} else {
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_deposit_tl(s->T1, s->T0, s->T1, 16, 16);
|
2008-05-17 16:44:31 +04:00
|
|
|
}
|
2020-12-11 18:24:19 +03:00
|
|
|
/*
|
|
|
|
* If TARGET_X86_64 defined then fall through into MO_32 case,
|
|
|
|
* otherwise fall through default case.
|
|
|
|
*/
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_32:
|
2020-12-11 18:24:19 +03:00
|
|
|
#ifdef TARGET_X86_64
|
2013-02-20 09:06:31 +04:00
|
|
|
/* Concatenate the two 32-bit values and use a 64-bit shift. */
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_subi_tl(s->tmp0, count, 1);
|
2008-05-17 16:44:31 +04:00
|
|
|
if (is_right) {
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_concat_tl_i64(s->T0, s->T0, s->T1);
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_shr_i64(s->tmp0, s->T0, s->tmp0);
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_shr_i64(s->T0, s->T0, count);
|
2013-02-20 09:06:31 +04:00
|
|
|
} else {
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_concat_tl_i64(s->T0, s->T1, s->T0);
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_shl_i64(s->tmp0, s->T0, s->tmp0);
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_shl_i64(s->T0, s->T0, count);
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_shri_i64(s->tmp0, s->tmp0, 32);
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_shri_i64(s->T0, s->T0, 32);
|
2013-02-20 09:06:31 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_subi_tl(s->tmp0, count, 1);
|
2013-02-20 09:06:31 +04:00
|
|
|
if (is_right) {
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_shr_tl(s->tmp0, s->T0, s->tmp0);
|
2008-05-17 16:44:31 +04:00
|
|
|
|
2018-09-11 21:10:21 +03:00
|
|
|
tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_shr_tl(s->T0, s->T0, count);
|
2018-09-11 21:10:21 +03:00
|
|
|
tcg_gen_shl_tl(s->T1, s->T1, s->tmp4);
|
2008-05-17 16:44:31 +04:00
|
|
|
} else {
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_shl_tl(s->tmp0, s->T0, s->tmp0);
|
2013-11-02 20:54:47 +04:00
|
|
|
if (ot == MO_16) {
|
2013-02-20 09:06:31 +04:00
|
|
|
/* Only needed if count > 16, for Intel behaviour. */
|
2018-09-11 21:10:21 +03:00
|
|
|
tcg_gen_subfi_tl(s->tmp4, 33, count);
|
|
|
|
tcg_gen_shr_tl(s->tmp4, s->T1, s->tmp4);
|
|
|
|
tcg_gen_or_tl(s->tmp0, s->tmp0, s->tmp4);
|
2013-02-20 09:06:31 +04:00
|
|
|
}
|
|
|
|
|
2018-09-11 21:10:21 +03:00
|
|
|
tcg_gen_subfi_tl(s->tmp4, mask + 1, count);
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_shl_tl(s->T0, s->T0, count);
|
2018-09-11 21:10:21 +03:00
|
|
|
tcg_gen_shr_tl(s->T1, s->T1, s->tmp4);
|
2008-05-17 16:44:31 +04:00
|
|
|
}
|
2018-09-11 21:10:21 +03:00
|
|
|
tcg_gen_movi_tl(s->tmp4, 0);
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, s->T1, count, s->tmp4,
|
|
|
|
s->tmp4, s->T1);
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_or_tl(s->T0, s->T0, s->T1);
|
2013-02-20 09:06:31 +04:00
|
|
|
break;
|
2008-05-17 16:44:31 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* store */
|
2013-11-02 21:59:43 +04:00
|
|
|
gen_op_st_rm_T0_A0(s, ot, op1);
|
2008-05-25 21:26:41 +04:00
|
|
|
|
2018-09-11 21:07:57 +03:00
|
|
|
gen_shift_flags(s, ot, s->T0, s->tmp0, count, is_right);
|
2008-05-17 16:44:31 +04:00
|
|
|
}
|
|
|
|
|
target/i386: trap on instructions longer than >15 bytes
Besides being more correct, arbitrarily long instruction allow the
generation of a translation block that spans three pages. This
confuses the generator and even allows ring 3 code to poison the
translation block cache and inject code into other processes that are
in guest ring 3.
This is an improved (and more invasive) fix for commit 30663fd ("tcg/i386:
Check the size of instruction being translated", 2017-03-24). In addition
to being more precise (and generating the right exception, which is #GP
rather than #UD), it distinguishes better between page faults and too long
instructions, as shown by this test case:
#include <sys/mman.h>
#include <string.h>
#include <stdio.h>
int main()
{
char *x = mmap(NULL, 8192, PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_PRIVATE|MAP_ANON, -1, 0);
memset(x, 0x66, 4096);
x[4096] = 0x90;
x[4097] = 0xc3;
char *i = x + 4096 - 15;
mprotect(x + 4096, 4096, PROT_READ|PROT_WRITE);
((void(*)(void)) i) ();
}
... which produces a #GP without the mprotect, and a #PF with it.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-03-22 13:57:10 +03:00
|
|
|
#define X86_MAX_INSN_LENGTH 15
|
|
|
|
|
2017-04-26 14:59:34 +03:00
|
|
|
static uint64_t advance_pc(CPUX86State *env, DisasContext *s, int num_bytes)
|
|
|
|
{
|
|
|
|
uint64_t pc = s->pc;
|
|
|
|
|
2022-08-17 18:05:05 +03:00
|
|
|
/* This is a subsequent insn that crosses a page boundary. */
|
|
|
|
if (s->base.num_insns > 1 &&
|
|
|
|
!is_same_page(&s->base, s->pc + num_bytes - 1)) {
|
|
|
|
siglongjmp(s->jmpbuf, 2);
|
|
|
|
}
|
|
|
|
|
2017-04-26 14:59:34 +03:00
|
|
|
s->pc += num_bytes;
|
2022-10-01 17:09:20 +03:00
|
|
|
if (unlikely(cur_insn_len(s) > X86_MAX_INSN_LENGTH)) {
|
target/i386: trap on instructions longer than >15 bytes
Besides being more correct, arbitrarily long instruction allow the
generation of a translation block that spans three pages. This
confuses the generator and even allows ring 3 code to poison the
translation block cache and inject code into other processes that are
in guest ring 3.
This is an improved (and more invasive) fix for commit 30663fd ("tcg/i386:
Check the size of instruction being translated", 2017-03-24). In addition
to being more precise (and generating the right exception, which is #GP
rather than #UD), it distinguishes better between page faults and too long
instructions, as shown by this test case:
#include <sys/mman.h>
#include <string.h>
#include <stdio.h>
int main()
{
char *x = mmap(NULL, 8192, PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_PRIVATE|MAP_ANON, -1, 0);
memset(x, 0x66, 4096);
x[4096] = 0x90;
x[4097] = 0xc3;
char *i = x + 4096 - 15;
mprotect(x + 4096, 4096, PROT_READ|PROT_WRITE);
((void(*)(void)) i) ();
}
... which produces a #GP without the mprotect, and a #PF with it.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-03-22 13:57:10 +03:00
|
|
|
/* If the instruction's 16th byte is on a different page than the 1st, a
|
|
|
|
* page fault on the second page wins over the general protection fault
|
|
|
|
* caused by the instruction being too long.
|
|
|
|
* This can happen even if the operand is only one byte long!
|
|
|
|
*/
|
|
|
|
if (((s->pc - 1) ^ (pc - 1)) & TARGET_PAGE_MASK) {
|
2024-04-05 12:01:59 +03:00
|
|
|
(void)translator_ldub(env, &s->base,
|
|
|
|
(s->pc - 1) & TARGET_PAGE_MASK);
|
target/i386: trap on instructions longer than >15 bytes
Besides being more correct, arbitrarily long instruction allow the
generation of a translation block that spans three pages. This
confuses the generator and even allows ring 3 code to poison the
translation block cache and inject code into other processes that are
in guest ring 3.
This is an improved (and more invasive) fix for commit 30663fd ("tcg/i386:
Check the size of instruction being translated", 2017-03-24). In addition
to being more precise (and generating the right exception, which is #GP
rather than #UD), it distinguishes better between page faults and too long
instructions, as shown by this test case:
#include <sys/mman.h>
#include <string.h>
#include <stdio.h>
int main()
{
char *x = mmap(NULL, 8192, PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_PRIVATE|MAP_ANON, -1, 0);
memset(x, 0x66, 4096);
x[4096] = 0x90;
x[4097] = 0xc3;
char *i = x + 4096 - 15;
mprotect(x + 4096, 4096, PROT_READ|PROT_WRITE);
((void(*)(void)) i) ();
}
... which produces a #GP without the mprotect, and a #PF with it.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-03-22 13:57:10 +03:00
|
|
|
}
|
|
|
|
siglongjmp(s->jmpbuf, 1);
|
|
|
|
}
|
|
|
|
|
2017-04-26 14:59:34 +03:00
|
|
|
return pc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint8_t x86_ldub_code(CPUX86State *env, DisasContext *s)
|
|
|
|
{
|
2021-08-10 01:32:59 +03:00
|
|
|
return translator_ldub(env, &s->base, advance_pc(env, s, 1));
|
2017-04-26 14:59:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint16_t x86_lduw_code(CPUX86State *env, DisasContext *s)
|
|
|
|
{
|
2021-08-10 01:32:59 +03:00
|
|
|
return translator_lduw(env, &s->base, advance_pc(env, s, 2));
|
2017-04-26 14:59:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t x86_ldl_code(CPUX86State *env, DisasContext *s)
|
|
|
|
{
|
2021-08-10 01:32:59 +03:00
|
|
|
return translator_ldl(env, &s->base, advance_pc(env, s, 4));
|
2017-04-26 14:59:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
static inline uint64_t x86_ldq_code(CPUX86State *env, DisasContext *s)
|
|
|
|
{
|
2021-08-10 01:32:59 +03:00
|
|
|
return translator_ldq(env, &s->base, advance_pc(env, s, 8));
|
2017-04-26 14:59:34 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-07-09 10:22:01 +03:00
|
|
|
/* Decompose an address. */
|
|
|
|
|
|
|
|
typedef struct AddressParts {
|
|
|
|
int def_seg;
|
|
|
|
int base;
|
|
|
|
int index;
|
|
|
|
int scale;
|
|
|
|
target_long disp;
|
|
|
|
} AddressParts;
|
|
|
|
|
|
|
|
static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
|
|
|
|
int modrm)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2015-07-09 10:22:01 +03:00
|
|
|
int def_seg, base, index, scale, mod, rm;
|
2005-01-04 02:50:08 +03:00
|
|
|
target_long disp;
|
2015-07-09 10:22:01 +03:00
|
|
|
bool havesib;
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2015-12-17 22:19:19 +03:00
|
|
|
def_seg = R_DS;
|
2015-07-09 10:22:01 +03:00
|
|
|
index = -1;
|
|
|
|
scale = 0;
|
|
|
|
disp = 0;
|
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
mod = (modrm >> 6) & 3;
|
|
|
|
rm = modrm & 7;
|
2015-07-09 10:22:01 +03:00
|
|
|
base = rm | REX_B(s);
|
|
|
|
|
|
|
|
if (mod == 3) {
|
|
|
|
/* Normally filtered out earlier, but including this path
|
|
|
|
simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
|
|
|
|
goto done;
|
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2013-11-06 02:27:33 +04:00
|
|
|
switch (s->aflag) {
|
|
|
|
case MO_64:
|
|
|
|
case MO_32:
|
2003-10-01 00:34:21 +04:00
|
|
|
havesib = 0;
|
2015-07-09 10:22:01 +03:00
|
|
|
if (rm == 4) {
|
2017-04-26 14:59:34 +03:00
|
|
|
int code = x86_ldub_code(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
scale = (code >> 6) & 3;
|
2005-01-04 02:50:08 +03:00
|
|
|
index = ((code >> 3) & 7) | REX_X(s);
|
2013-11-12 05:16:56 +04:00
|
|
|
if (index == 4) {
|
|
|
|
index = -1; /* no index */
|
|
|
|
}
|
2015-07-09 10:22:01 +03:00
|
|
|
base = (code & 7) | REX_B(s);
|
|
|
|
havesib = 1;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
switch (mod) {
|
|
|
|
case 0:
|
2005-01-04 02:50:08 +03:00
|
|
|
if ((base & 7) == 5) {
|
2003-10-01 00:34:21 +04:00
|
|
|
base = -1;
|
2017-04-26 14:59:34 +03:00
|
|
|
disp = (int32_t)x86_ldl_code(env, s);
|
2005-01-04 02:50:08 +03:00
|
|
|
if (CODE64(s) && !havesib) {
|
2015-07-09 10:22:01 +03:00
|
|
|
base = -2;
|
2005-01-04 02:50:08 +03:00
|
|
|
disp += s->pc + s->rip_offset;
|
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1:
|
2017-04-26 14:59:34 +03:00
|
|
|
disp = (int8_t)x86_ldub_code(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case 2:
|
2017-04-26 14:59:34 +03:00
|
|
|
disp = (int32_t)x86_ldl_code(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
}
|
2007-09-17 12:09:54 +04:00
|
|
|
|
2013-11-12 05:16:56 +04:00
|
|
|
/* For correct popl handling with esp. */
|
|
|
|
if (base == R_ESP && s->popl_esp_hack) {
|
|
|
|
disp += s->popl_esp_hack;
|
|
|
|
}
|
2015-12-17 22:19:19 +03:00
|
|
|
if (base == R_EBP || base == R_ESP) {
|
|
|
|
def_seg = R_SS;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2013-11-06 02:27:33 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
case MO_16:
|
2015-12-17 22:19:19 +03:00
|
|
|
if (mod == 0) {
|
2003-10-01 00:34:21 +04:00
|
|
|
if (rm == 6) {
|
2015-07-09 10:22:01 +03:00
|
|
|
base = -1;
|
2017-04-26 14:59:34 +03:00
|
|
|
disp = x86_lduw_code(env, s);
|
2015-12-17 22:19:19 +03:00
|
|
|
break;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2015-12-17 22:19:19 +03:00
|
|
|
} else if (mod == 1) {
|
2017-04-26 14:59:34 +03:00
|
|
|
disp = (int8_t)x86_ldub_code(env, s);
|
2015-12-17 22:19:19 +03:00
|
|
|
} else {
|
2017-04-26 14:59:34 +03:00
|
|
|
disp = (int16_t)x86_lduw_code(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2013-11-06 05:34:38 +04:00
|
|
|
|
|
|
|
switch (rm) {
|
2003-10-01 00:34:21 +04:00
|
|
|
case 0:
|
2015-07-09 10:22:01 +03:00
|
|
|
base = R_EBX;
|
|
|
|
index = R_ESI;
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 1:
|
2015-07-09 10:22:01 +03:00
|
|
|
base = R_EBX;
|
|
|
|
index = R_EDI;
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 2:
|
2015-07-09 10:22:01 +03:00
|
|
|
base = R_EBP;
|
|
|
|
index = R_ESI;
|
2015-12-17 22:19:19 +03:00
|
|
|
def_seg = R_SS;
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 3:
|
2015-07-09 10:22:01 +03:00
|
|
|
base = R_EBP;
|
|
|
|
index = R_EDI;
|
2015-12-17 22:19:19 +03:00
|
|
|
def_seg = R_SS;
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 4:
|
2015-07-09 10:22:01 +03:00
|
|
|
base = R_ESI;
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 5:
|
2015-07-09 10:22:01 +03:00
|
|
|
base = R_EDI;
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 6:
|
2015-07-09 10:22:01 +03:00
|
|
|
base = R_EBP;
|
2015-12-17 22:19:19 +03:00
|
|
|
def_seg = R_SS;
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case 7:
|
2015-07-09 10:22:01 +03:00
|
|
|
base = R_EBX;
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
}
|
2013-11-06 02:27:33 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
2023-04-05 22:09:14 +03:00
|
|
|
g_assert_not_reached();
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2015-12-17 22:19:19 +03:00
|
|
|
|
2015-07-09 10:22:01 +03:00
|
|
|
done:
|
|
|
|
return (AddressParts){ def_seg, base, index, scale, disp };
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2015-07-09 10:22:01 +03:00
|
|
|
/* Compute the address, with a minimum number of TCG ops. */
|
2022-09-18 01:43:52 +03:00
|
|
|
static TCGv gen_lea_modrm_1(DisasContext *s, AddressParts a, bool is_vsib)
|
2006-09-03 21:09:02 +04:00
|
|
|
{
|
2017-11-02 14:47:37 +03:00
|
|
|
TCGv ea = NULL;
|
2007-09-17 12:09:54 +04:00
|
|
|
|
2022-09-18 01:43:52 +03:00
|
|
|
if (a.index >= 0 && !is_vsib) {
|
2015-07-09 10:22:01 +03:00
|
|
|
if (a.scale == 0) {
|
|
|
|
ea = cpu_regs[a.index];
|
|
|
|
} else {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_shli_tl(s->A0, cpu_regs[a.index], a.scale);
|
|
|
|
ea = s->A0;
|
2006-09-03 21:09:02 +04:00
|
|
|
}
|
2015-07-09 10:22:01 +03:00
|
|
|
if (a.base >= 0) {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_add_tl(s->A0, ea, cpu_regs[a.base]);
|
|
|
|
ea = s->A0;
|
2006-09-03 21:09:02 +04:00
|
|
|
}
|
2015-07-09 10:22:01 +03:00
|
|
|
} else if (a.base >= 0) {
|
|
|
|
ea = cpu_regs[a.base];
|
|
|
|
}
|
2017-11-02 14:47:37 +03:00
|
|
|
if (!ea) {
|
2023-02-27 16:51:42 +03:00
|
|
|
if (tb_cflags(s->base.tb) & CF_PCREL && a.base == -2) {
|
2022-10-01 17:09:35 +03:00
|
|
|
/* With cpu_eip ~= pc_save, the expression is pc-relative. */
|
|
|
|
tcg_gen_addi_tl(s->A0, cpu_eip, a.disp - s->pc_save);
|
|
|
|
} else {
|
|
|
|
tcg_gen_movi_tl(s->A0, a.disp);
|
|
|
|
}
|
2018-09-11 21:41:57 +03:00
|
|
|
ea = s->A0;
|
2015-07-09 10:22:01 +03:00
|
|
|
} else if (a.disp != 0) {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_addi_tl(s->A0, ea, a.disp);
|
|
|
|
ea = s->A0;
|
2015-07-09 10:22:01 +03:00
|
|
|
}
|
2013-11-06 02:27:33 +04:00
|
|
|
|
2015-07-09 10:22:01 +03:00
|
|
|
return ea;
|
|
|
|
}
|
2013-11-06 02:27:33 +04:00
|
|
|
|
2015-07-09 10:22:01 +03:00
|
|
|
static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
|
|
|
|
{
|
|
|
|
AddressParts a = gen_lea_modrm_0(env, s, modrm);
|
2022-09-18 01:43:52 +03:00
|
|
|
TCGv ea = gen_lea_modrm_1(s, a, false);
|
2015-07-09 10:22:01 +03:00
|
|
|
gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
|
|
|
|
{
|
|
|
|
(void)gen_lea_modrm_0(env, s, modrm);
|
2006-09-03 21:09:02 +04:00
|
|
|
}
|
|
|
|
|
2015-07-06 21:37:00 +03:00
|
|
|
/* Used for BNDCL, BNDCU, BNDCN. */
|
|
|
|
static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
|
|
|
|
TCGCond cond, TCGv_i64 bndv)
|
|
|
|
{
|
2022-09-18 01:43:52 +03:00
|
|
|
AddressParts a = gen_lea_modrm_0(env, s, modrm);
|
|
|
|
TCGv ea = gen_lea_modrm_1(s, a, false);
|
2015-07-06 21:37:00 +03:00
|
|
|
|
2018-09-11 21:22:31 +03:00
|
|
|
tcg_gen_extu_tl_i64(s->tmp1_i64, ea);
|
2015-07-06 21:37:00 +03:00
|
|
|
if (!CODE64(s)) {
|
2018-09-11 21:22:31 +03:00
|
|
|
tcg_gen_ext32u_i64(s->tmp1_i64, s->tmp1_i64);
|
2015-07-06 21:37:00 +03:00
|
|
|
}
|
2018-09-11 21:22:31 +03:00
|
|
|
tcg_gen_setcond_i64(cond, s->tmp1_i64, s->tmp1_i64, bndv);
|
|
|
|
tcg_gen_extrl_i64_i32(s->tmp2_i32, s->tmp1_i64);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_bndck(tcg_env, s->tmp2_i32);
|
2015-07-06 21:37:00 +03:00
|
|
|
}
|
|
|
|
|
2005-01-08 21:58:29 +03:00
|
|
|
/* used for LEA and MOV AX, mem */
|
|
|
|
static void gen_add_A0_ds_seg(DisasContext *s)
|
|
|
|
{
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_lea_v_seg(s, s->aflag, s->A0, R_DS, s->override);
|
2005-01-08 21:58:29 +03:00
|
|
|
}
|
|
|
|
|
2008-10-04 07:27:44 +04:00
|
|
|
/* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
|
2003-10-01 00:34:21 +04:00
|
|
|
OR_TMP0 */
|
2012-09-08 17:26:02 +04:00
|
|
|
static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp ot, int reg, int is_store)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2013-11-02 22:55:59 +04:00
|
|
|
int mod, rm;
|
2003-10-01 00:34:21 +04:00
|
|
|
|
|
|
|
mod = (modrm >> 6) & 3;
|
2005-01-04 02:50:08 +03:00
|
|
|
rm = (modrm & 7) | REX_B(s);
|
2003-10-01 00:34:21 +04:00
|
|
|
if (mod == 3) {
|
|
|
|
if (is_store) {
|
|
|
|
if (reg != OR_TMP0)
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, s->T0, reg);
|
|
|
|
gen_op_mov_reg_v(s, ot, rm, s->T0);
|
2003-10-01 00:34:21 +04:00
|
|
|
} else {
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, s->T0, rm);
|
2003-10-01 00:34:21 +04:00
|
|
|
if (reg != OR_TMP0)
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, reg, s->T0);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
} else {
|
2013-11-02 22:55:59 +04:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2003-10-01 00:34:21 +04:00
|
|
|
if (is_store) {
|
|
|
|
if (reg != OR_TMP0)
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, s->T0, reg);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, ot, s->T0, s->A0);
|
2003-10-01 00:34:21 +04:00
|
|
|
} else {
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T0, s->A0);
|
2003-10-01 00:34:21 +04:00
|
|
|
if (reg != OR_TMP0)
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, reg, s->T0);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-08-23 16:50:48 +03:00
|
|
|
static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot)
|
|
|
|
{
|
|
|
|
target_ulong ret;
|
|
|
|
|
|
|
|
switch (ot) {
|
|
|
|
case MO_8:
|
|
|
|
ret = x86_ldub_code(env, s);
|
|
|
|
break;
|
|
|
|
case MO_16:
|
|
|
|
ret = x86_lduw_code(env, s);
|
|
|
|
break;
|
|
|
|
case MO_32:
|
|
|
|
ret = x86_ldl_code(env, s);
|
|
|
|
break;
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
case MO_64:
|
|
|
|
ret = x86_ldq_code(env, s);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-08-23 21:10:58 +03:00
|
|
|
static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
|
|
|
uint32_t ret;
|
|
|
|
|
2013-11-06 01:25:05 +04:00
|
|
|
switch (ot) {
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_8:
|
2017-04-26 14:59:34 +03:00
|
|
|
ret = x86_ldub_code(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_16:
|
2017-04-26 14:59:34 +03:00
|
|
|
ret = x86_lduw_code(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2013-11-02 20:54:47 +04:00
|
|
|
case MO_32:
|
2013-11-06 01:25:05 +04:00
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
case MO_64:
|
|
|
|
#endif
|
2017-04-26 14:59:34 +03:00
|
|
|
ret = x86_ldl_code(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2013-11-06 01:25:05 +04:00
|
|
|
default:
|
2023-04-05 22:09:14 +03:00
|
|
|
g_assert_not_reached();
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
target/i386: add core of new i386 decoder
The new decoder is based on three principles:
- use mostly table-driven decoding, using tables derived as much as possible
from the Intel manual. Centralizing the decode the operands makes it
more homogeneous, for example all immediates are signed. All modrm
handling is in one function, and can be shared between SSE and ALU
instructions (including XMM<->GPR instructions). The SSE/AVX decoder
will also not have duplicated code between the 0F, 0F38 and 0F3A tables.
- keep the code as "non-branchy" as possible. Generally, the code for
the new decoder is more verbose, but the control flow is simpler.
Conditionals are not nested and have small bodies. All instruction
groups are resolved even before operands are decoded, and code
generation is separated as much as possible within small functions
that only handle one instruction each.
- keep address generation and (for ALU operands) memory loads and writeback
as much in common code as possible. All ALU operations for example
are implemented as T0=f(T0,T1). For non-ALU instructions,
read-modify-write memory operations are rare, but registers do not
have TCGv equivalents: therefore, the common logic sets up pointer
temporaries with the operands, while load and writeback are handled
by gvec or by helpers.
These principles make future code review and extensibility simpler, at
the cost of having a relatively large amount of code in the form of this
patch. Even EVEX should not be _too_ hard to implement (it's just a crazy
large amount of possibilities).
This patch introduces the main decoder flow, and integrates the old
decoder with the new one. The old decoder takes care of parsing
prefixes and then optionally drops to the new one. The changes to the
old decoder are minimal and allow it to be replaced incrementally with
the new one.
There is a debugging mechanism through a "LIMIT" environment variable.
In user-mode emulation, the variable is the number of instructions
decoded by the new decoder before permanently switching to the old one.
In system emulation, the variable is the highest opcode that is decoded
by the new decoder (this is less friendly, but it's the best that can
be done without requiring deterministic execution).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-23 12:20:55 +03:00
|
|
|
static target_long insn_get_signed(CPUX86State *env, DisasContext *s, MemOp ot)
|
|
|
|
{
|
|
|
|
target_long ret;
|
|
|
|
|
|
|
|
switch (ot) {
|
|
|
|
case MO_8:
|
|
|
|
ret = (int8_t) x86_ldub_code(env, s);
|
|
|
|
break;
|
|
|
|
case MO_16:
|
|
|
|
ret = (int16_t) x86_lduw_code(env, s);
|
|
|
|
break;
|
|
|
|
case MO_32:
|
|
|
|
ret = (int32_t) x86_ldl_code(env, s);
|
|
|
|
break;
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
case MO_64:
|
|
|
|
ret = x86_ldq_code(env, s);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
target/i386: move C0-FF opcodes to new decoder (except for x87)
The shift instructions are rewritten instead of reusing code from the old
decoder. Rotates use CC_OP_ADCOX more extensively and generally rely
more on the optimizer, so that the code generators are shared between
the immediate-count and variable-count cases.
In particular, this makes gen_RCL and gen_RCR pretty efficient for the
count == 1 case, which becomes (apart from a few extra movs) something like:
(compute_cc_all if needed)
// save old value for OF calculation
mov cc_src2, T0
// the bulk of RCL is just this!
deposit T0, cc_src, T0, 1, TARGET_LONG_BITS - 1
// compute carry
shr cc_dst, cc_src2, length - 1
and cc_dst, cc_dst, 1
// compute overflow
xor cc_src2, cc_src2, T0
extract cc_src2, cc_src2, length - 1, 1
32-bit MUL and IMUL are also slightly more efficient on 64-bit hosts.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-10-21 18:36:34 +03:00
|
|
|
static void gen_conditional_jump_labels(DisasContext *s, target_long diff,
|
|
|
|
TCGLabel *not_taken, TCGLabel *taken)
|
|
|
|
{
|
|
|
|
if (not_taken) {
|
|
|
|
gen_set_label(not_taken);
|
|
|
|
}
|
|
|
|
gen_jmp_rel_csize(s, 0, 1);
|
|
|
|
|
|
|
|
gen_set_label(taken);
|
|
|
|
gen_jmp_rel(s, s->dflag, diff, 0);
|
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:28 +03:00
|
|
|
static void gen_jcc(DisasContext *s, int b, int diff)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2022-10-01 17:09:28 +03:00
|
|
|
TCGLabel *l1 = gen_new_label();
|
2008-05-21 23:16:45 +04:00
|
|
|
|
2022-10-01 17:09:28 +03:00
|
|
|
gen_jcc1(s, b, l1);
|
target/i386: move C0-FF opcodes to new decoder (except for x87)
The shift instructions are rewritten instead of reusing code from the old
decoder. Rotates use CC_OP_ADCOX more extensively and generally rely
more on the optimizer, so that the code generators are shared between
the immediate-count and variable-count cases.
In particular, this makes gen_RCL and gen_RCR pretty efficient for the
count == 1 case, which becomes (apart from a few extra movs) something like:
(compute_cc_all if needed)
// save old value for OF calculation
mov cc_src2, T0
// the bulk of RCL is just this!
deposit T0, cc_src, T0, 1, TARGET_LONG_BITS - 1
// compute carry
shr cc_dst, cc_src2, length - 1
and cc_dst, cc_dst, 1
// compute overflow
xor cc_src2, cc_src2, T0
extract cc_src2, cc_src2, length - 1, 1
32-bit MUL and IMUL are also slightly more efficient on 64-bit hosts.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-10-21 18:36:34 +03:00
|
|
|
gen_conditional_jump_labels(s, diff, NULL, l1);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2023-10-27 05:22:25 +03:00
|
|
|
static void gen_cmovcc1(DisasContext *s, int b, TCGv dest, TCGv src)
|
2012-10-07 19:55:26 +04:00
|
|
|
{
|
2024-04-10 12:57:15 +03:00
|
|
|
CCPrepare cc = gen_prepare_cc(s, b, NULL);
|
2012-10-07 19:55:26 +04:00
|
|
|
|
2013-01-16 23:00:14 +04:00
|
|
|
if (!cc.use_reg2) {
|
2023-02-26 02:26:02 +03:00
|
|
|
cc.reg2 = tcg_constant_tl(cc.imm);
|
2012-10-07 19:55:26 +04:00
|
|
|
}
|
|
|
|
|
2023-10-27 05:22:25 +03:00
|
|
|
tcg_gen_movcond_tl(cc.cond, dest, cc.reg, cc.reg2, src, dest);
|
2012-10-07 19:55:26 +04:00
|
|
|
}
|
|
|
|
|
2023-10-23 09:49:12 +03:00
|
|
|
static void gen_op_movl_seg_real(DisasContext *s, X86Seg seg_reg, TCGv seg)
|
2008-05-21 20:34:06 +04:00
|
|
|
{
|
2023-10-23 09:49:12 +03:00
|
|
|
TCGv selector = tcg_temp_new();
|
|
|
|
tcg_gen_ext16u_tl(selector, seg);
|
|
|
|
tcg_gen_st32_tl(selector, tcg_env,
|
2008-05-21 20:34:06 +04:00
|
|
|
offsetof(CPUX86State,segs[seg_reg].selector));
|
2023-10-23 09:49:12 +03:00
|
|
|
tcg_gen_shli_tl(cpu_seg_base[seg_reg], selector, 4);
|
2008-05-21 20:34:06 +04:00
|
|
|
}
|
|
|
|
|
2024-02-28 13:15:43 +03:00
|
|
|
/* move SRC to seg_reg and compute if the CPU state may change. Never
|
2003-10-01 00:34:21 +04:00
|
|
|
call this function with seg_reg == R_CS */
|
2024-02-28 13:15:43 +03:00
|
|
|
static void gen_movl_seg(DisasContext *s, X86Seg seg_reg, TCGv src)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2021-05-14 18:13:01 +03:00
|
|
|
if (PE(s) && !VM86(s)) {
|
2024-02-28 13:15:43 +03:00
|
|
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, src);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_load_seg(tcg_env, tcg_constant_i32(seg_reg), s->tmp2_i32);
|
2004-06-13 17:26:14 +04:00
|
|
|
/* abort translation because the addseg value may change or
|
|
|
|
because ss32 may change. For R_SS, translation must always
|
|
|
|
stop as a special handling must be done to disable hardware
|
|
|
|
interrupts for the next instruction */
|
2022-10-01 17:09:17 +03:00
|
|
|
if (seg_reg == R_SS) {
|
|
|
|
s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
|
|
|
|
} else if (CODE32(s) && seg_reg < R_FS) {
|
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2017-07-14 20:54:07 +03:00
|
|
|
}
|
2004-01-04 18:21:33 +03:00
|
|
|
} else {
|
2024-02-28 13:15:43 +03:00
|
|
|
gen_op_movl_seg_real(s, seg_reg, src);
|
2017-07-14 20:54:07 +03:00
|
|
|
if (seg_reg == R_SS) {
|
2022-10-01 17:09:17 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_INHIBIT_IRQ;
|
2017-07-14 20:54:07 +03:00
|
|
|
}
|
2004-01-04 18:21:33 +03:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2023-10-23 09:49:12 +03:00
|
|
|
static void gen_far_call(DisasContext *s)
|
|
|
|
{
|
|
|
|
TCGv_i32 new_cs = tcg_temp_new_i32();
|
|
|
|
tcg_gen_trunc_tl_i32(new_cs, s->T1);
|
|
|
|
if (PE(s) && !VM86(s)) {
|
|
|
|
gen_helper_lcall_protected(tcg_env, new_cs, s->T0,
|
|
|
|
tcg_constant_i32(s->dflag - 1),
|
|
|
|
eip_next_tl(s));
|
|
|
|
} else {
|
|
|
|
TCGv_i32 new_eip = tcg_temp_new_i32();
|
|
|
|
tcg_gen_trunc_tl_i32(new_eip, s->T0);
|
|
|
|
gen_helper_lcall_real(tcg_env, new_cs, new_eip,
|
|
|
|
tcg_constant_i32(s->dflag - 1),
|
|
|
|
eip_next_i32(s));
|
|
|
|
}
|
|
|
|
s->base.is_jmp = DISAS_JUMP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_far_jmp(DisasContext *s)
|
|
|
|
{
|
|
|
|
if (PE(s) && !VM86(s)) {
|
|
|
|
TCGv_i32 new_cs = tcg_temp_new_i32();
|
|
|
|
tcg_gen_trunc_tl_i32(new_cs, s->T1);
|
|
|
|
gen_helper_ljmp_protected(tcg_env, new_cs, s->T0,
|
|
|
|
eip_next_tl(s));
|
|
|
|
} else {
|
|
|
|
gen_op_movl_seg_real(s, R_CS, s->T1);
|
|
|
|
gen_op_jmp_v(s, s->T0);
|
|
|
|
}
|
|
|
|
s->base.is_jmp = DISAS_JUMP;
|
|
|
|
}
|
|
|
|
|
2021-05-14 18:13:29 +03:00
|
|
|
static void gen_svm_check_intercept(DisasContext *s, uint32_t type)
|
2007-09-23 19:28:04 +04:00
|
|
|
{
|
2008-05-28 20:16:54 +04:00
|
|
|
/* no SVM activated; fast case */
|
2021-05-14 18:13:23 +03:00
|
|
|
if (likely(!GUEST(s))) {
|
2008-05-28 20:16:54 +04:00
|
|
|
return;
|
2021-05-14 18:13:23 +03:00
|
|
|
}
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_svm_check_intercept(tcg_env, tcg_constant_i32(type));
|
2007-09-23 19:28:04 +04:00
|
|
|
}
|
|
|
|
|
2004-01-04 20:35:00 +03:00
|
|
|
static inline void gen_stack_update(DisasContext *s, int addend)
|
|
|
|
{
|
2018-09-11 21:07:57 +03:00
|
|
|
gen_op_add_reg_im(s, mo_stacksize(s), R_ESP, addend);
|
2004-01-04 20:35:00 +03:00
|
|
|
}
|
|
|
|
|
2013-11-06 07:19:04 +04:00
|
|
|
/* Generate a push. It depends on ss32, addseg and dflag. */
|
|
|
|
static void gen_push_v(DisasContext *s, TCGv val)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp d_ot = mo_pushpop(s, s->dflag);
|
|
|
|
MemOp a_ot = mo_stacksize(s);
|
2013-11-06 07:19:04 +04:00
|
|
|
int size = 1 << d_ot;
|
2018-09-11 21:41:57 +03:00
|
|
|
TCGv new_esp = s->A0;
|
2013-11-06 07:19:04 +04:00
|
|
|
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_subi_tl(s->A0, cpu_regs[R_ESP], size);
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2015-12-17 22:19:20 +03:00
|
|
|
if (!CODE64(s)) {
|
2021-05-14 18:13:06 +03:00
|
|
|
if (ADDSEG(s)) {
|
2023-10-20 10:32:06 +03:00
|
|
|
new_esp = tcg_temp_new();
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_mov_tl(new_esp, s->A0);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2013-11-06 07:19:04 +04:00
|
|
|
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_op_st_v(s, d_ot, val, s->A0);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, a_ot, R_ESP, new_esp);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2004-01-04 20:35:00 +03:00
|
|
|
/* two step pop is necessary for precise exceptions */
|
2019-08-23 21:10:58 +03:00
|
|
|
static MemOp gen_pop_T0(DisasContext *s)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp d_ot = mo_pushpop(s, s->dflag);
|
2013-11-06 07:57:45 +04:00
|
|
|
|
2022-09-21 15:13:01 +03:00
|
|
|
gen_lea_v_seg_dest(s, mo_stacksize(s), s->T0, cpu_regs[R_ESP], R_SS, -1);
|
|
|
|
gen_op_ld_v(s, d_ot, s->T0, s->T0);
|
2013-11-06 07:57:45 +04:00
|
|
|
|
|
|
|
return d_ot;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2019-08-23 21:10:58 +03:00
|
|
|
static inline void gen_pop_update(DisasContext *s, MemOp ot)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2013-11-06 07:57:45 +04:00
|
|
|
gen_stack_update(s, 1 << ot);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2015-12-17 22:19:20 +03:00
|
|
|
static inline void gen_stack_A0(DisasContext *s)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2021-05-14 18:13:03 +03:00
|
|
|
gen_lea_v_seg(s, SS32(s) ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_pusha(DisasContext *s)
|
|
|
|
{
|
2021-05-14 18:13:03 +03:00
|
|
|
MemOp s_ot = SS32(s) ? MO_32 : MO_16;
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp d_ot = s->dflag;
|
2015-12-17 22:19:22 +03:00
|
|
|
int size = 1 << d_ot;
|
2003-10-01 00:34:21 +04:00
|
|
|
int i;
|
2015-12-17 22:19:22 +03:00
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], (i - 8) * size);
|
|
|
|
gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
|
|
|
|
gen_op_st_v(s, d_ot, cpu_regs[7 - i], s->A0);
|
2015-12-17 22:19:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
gen_stack_update(s, -8 * size);
|
|
|
|
}
|
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
static void gen_popa(DisasContext *s)
|
|
|
|
{
|
2021-05-14 18:13:03 +03:00
|
|
|
MemOp s_ot = SS32(s) ? MO_32 : MO_16;
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp d_ot = s->dflag;
|
2015-12-17 22:19:22 +03:00
|
|
|
int size = 1 << d_ot;
|
2003-10-01 00:34:21 +04:00
|
|
|
int i;
|
2015-12-17 22:19:22 +03:00
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
2003-10-01 00:34:21 +04:00
|
|
|
/* ESP is not reloaded */
|
2015-12-17 22:19:22 +03:00
|
|
|
if (7 - i == R_ESP) {
|
|
|
|
continue;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_addi_tl(s->A0, cpu_regs[R_ESP], i * size);
|
|
|
|
gen_lea_v_seg(s, s_ot, s->A0, R_SS, -1);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, d_ot, s->T0, s->A0);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, d_ot, 7 - i, s->T0);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2015-12-17 22:19:22 +03:00
|
|
|
|
|
|
|
gen_stack_update(s, 8 * size);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_enter(DisasContext *s, int esp_addend, int level)
|
|
|
|
{
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp d_ot = mo_pushpop(s, s->dflag);
|
2021-05-14 18:13:03 +03:00
|
|
|
MemOp a_ot = CODE64(s) ? MO_64 : SS32(s) ? MO_32 : MO_16;
|
2015-12-17 22:19:23 +03:00
|
|
|
int size = 1 << d_ot;
|
2003-10-01 00:34:21 +04:00
|
|
|
|
2015-12-17 22:19:23 +03:00
|
|
|
/* Push BP; compute FrameTemp into T1. */
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_subi_tl(s->T1, cpu_regs[R_ESP], size);
|
|
|
|
gen_lea_v_seg(s, a_ot, s->T1, R_SS, -1);
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_op_st_v(s, d_ot, cpu_regs[R_EBP], s->A0);
|
2015-12-17 22:19:23 +03:00
|
|
|
|
|
|
|
level &= 31;
|
|
|
|
if (level != 0) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Copy level-1 pointers from the previous frame. */
|
|
|
|
for (i = 1; i < level; ++i) {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_subi_tl(s->A0, cpu_regs[R_EBP], size * i);
|
|
|
|
gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
|
2018-09-11 21:07:57 +03:00
|
|
|
gen_op_ld_v(s, d_ot, s->tmp0, s->A0);
|
2015-12-17 22:19:23 +03:00
|
|
|
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_subi_tl(s->A0, s->T1, size * i);
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
|
2018-09-11 21:07:57 +03:00
|
|
|
gen_op_st_v(s, d_ot, s->tmp0, s->A0);
|
2005-07-23 21:41:26 +04:00
|
|
|
}
|
2015-12-17 22:19:23 +03:00
|
|
|
|
|
|
|
/* Push the current FrameTemp as the last level. */
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_subi_tl(s->A0, s->T1, size * level);
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_lea_v_seg(s, a_ot, s->A0, R_SS, -1);
|
2018-09-11 21:50:46 +03:00
|
|
|
gen_op_st_v(s, d_ot, s->T1, s->A0);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2015-12-17 22:19:23 +03:00
|
|
|
|
|
|
|
/* Copy the FrameTemp value to EBP. */
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1);
|
2015-12-17 22:19:23 +03:00
|
|
|
|
|
|
|
/* Compute the final value of ESP. */
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2015-12-17 22:19:24 +03:00
|
|
|
static void gen_leave(DisasContext *s)
|
|
|
|
{
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp d_ot = mo_pushpop(s, s->dflag);
|
|
|
|
MemOp a_ot = mo_stacksize(s);
|
2015-12-17 22:19:24 +03:00
|
|
|
|
|
|
|
gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, d_ot, s->T0, s->A0);
|
2015-12-17 22:19:24 +03:00
|
|
|
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_addi_tl(s->T1, cpu_regs[R_EBP], 1 << d_ot);
|
2015-12-17 22:19:24 +03:00
|
|
|
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, d_ot, R_EBP, s->T0);
|
|
|
|
gen_op_mov_reg_v(s, a_ot, R_ESP, s->T1);
|
2015-12-17 22:19:24 +03:00
|
|
|
}
|
|
|
|
|
2016-03-02 03:53:18 +03:00
|
|
|
/* Similarly, except that the assumption here is that we don't decode
|
|
|
|
the instruction at all -- either a missing opcode, an unimplemented
|
|
|
|
feature, or just a bogus instruction stream. */
|
|
|
|
static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
|
|
|
|
{
|
|
|
|
gen_illegal_opcode(s);
|
|
|
|
|
|
|
|
if (qemu_loglevel_mask(LOG_UNIMP)) {
|
2022-04-17 21:29:47 +03:00
|
|
|
FILE *logfile = qemu_log_trylock();
|
2022-04-17 21:29:49 +03:00
|
|
|
if (logfile) {
|
2022-10-01 17:09:10 +03:00
|
|
|
target_ulong pc = s->base.pc_next, end = s->pc;
|
2019-11-19 00:15:26 +03:00
|
|
|
|
2022-04-17 21:29:49 +03:00
|
|
|
fprintf(logfile, "ILLOPC: " TARGET_FMT_lx ":", pc);
|
|
|
|
for (; pc < end; ++pc) {
|
2024-04-05 12:01:59 +03:00
|
|
|
fprintf(logfile, " %02x", translator_ldub(env, &s->base, pc));
|
2022-04-17 21:29:49 +03:00
|
|
|
}
|
|
|
|
fprintf(logfile, "\n");
|
|
|
|
qemu_log_unlock(logfile);
|
2016-03-02 03:53:18 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
/* an interrupt is different from an exception because of the
|
2007-05-27 23:39:27 +04:00
|
|
|
privilege checks */
|
target/i386: move C0-FF opcodes to new decoder (except for x87)
The shift instructions are rewritten instead of reusing code from the old
decoder. Rotates use CC_OP_ADCOX more extensively and generally rely
more on the optimizer, so that the code generators are shared between
the immediate-count and variable-count cases.
In particular, this makes gen_RCL and gen_RCR pretty efficient for the
count == 1 case, which becomes (apart from a few extra movs) something like:
(compute_cc_all if needed)
// save old value for OF calculation
mov cc_src2, T0
// the bulk of RCL is just this!
deposit T0, cc_src, T0, 1, TARGET_LONG_BITS - 1
// compute carry
shr cc_dst, cc_src2, length - 1
and cc_dst, cc_dst, 1
// compute overflow
xor cc_src2, cc_src2, T0
extract cc_src2, cc_src2, length - 1, 1
32-bit MUL and IMUL are also slightly more efficient on 64-bit hosts.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2023-10-21 18:36:34 +03:00
|
|
|
static void gen_interrupt(DisasContext *s, uint8_t intno)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2013-01-24 00:43:12 +04:00
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_raise_interrupt(tcg_env, tcg_constant_i32(intno),
|
2022-10-01 17:09:20 +03:00
|
|
|
cur_insn_len_i32(s));
|
2017-07-14 11:29:42 +03:00
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2015-07-06 19:29:59 +03:00
|
|
|
static void gen_set_hflag(DisasContext *s, uint32_t mask)
|
|
|
|
{
|
|
|
|
if ((s->flags & mask) == 0) {
|
|
|
|
TCGv_i32 t = tcg_temp_new_i32();
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
|
2015-07-06 19:29:59 +03:00
|
|
|
tcg_gen_ori_i32(t, t, mask);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
|
2015-07-06 19:29:59 +03:00
|
|
|
s->flags |= mask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_reset_hflag(DisasContext *s, uint32_t mask)
|
|
|
|
{
|
|
|
|
if (s->flags & mask) {
|
|
|
|
TCGv_i32 t = tcg_temp_new_i32();
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_i32(t, tcg_env, offsetof(CPUX86State, hflags));
|
2015-07-06 19:29:59 +03:00
|
|
|
tcg_gen_andi_i32(t, t, ~mask);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_st_i32(t, tcg_env, offsetof(CPUX86State, hflags));
|
2015-07-06 19:29:59 +03:00
|
|
|
s->flags &= ~mask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-24 09:16:30 +03:00
|
|
|
static void gen_set_eflags(DisasContext *s, target_ulong mask)
|
|
|
|
{
|
|
|
|
TCGv t = tcg_temp_new();
|
|
|
|
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
|
2022-10-24 09:16:30 +03:00
|
|
|
tcg_gen_ori_tl(t, t, mask);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
|
2022-10-24 09:16:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_reset_eflags(DisasContext *s, target_ulong mask)
|
|
|
|
{
|
|
|
|
TCGv t = tcg_temp_new();
|
|
|
|
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_tl(t, tcg_env, offsetof(CPUX86State, eflags));
|
2022-10-24 09:16:30 +03:00
|
|
|
tcg_gen_andi_tl(t, t, ~mask);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_st_tl(t, tcg_env, offsetof(CPUX86State, eflags));
|
2022-10-24 09:16:30 +03:00
|
|
|
}
|
|
|
|
|
2015-07-07 16:38:58 +03:00
|
|
|
/* Clear BND registers during legacy branches. */
|
|
|
|
static void gen_bnd_jmp(DisasContext *s)
|
|
|
|
{
|
2016-03-01 18:12:25 +03:00
|
|
|
/* Clear the registers only if BND prefix is missing, MPX is enabled,
|
|
|
|
and if the BNDREGs are known to be in use (non-zero) already.
|
|
|
|
The helper itself will check BNDPRESERVE at runtime. */
|
2015-07-07 16:38:58 +03:00
|
|
|
if ((s->prefix & PREFIX_REPNZ) == 0
|
2016-03-01 18:12:25 +03:00
|
|
|
&& (s->flags & HF_MPX_EN_MASK) != 0
|
|
|
|
&& (s->flags & HF_MPX_IU_MASK) != 0) {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_bnd_jmp(tcg_env);
|
2015-07-07 16:38:58 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-03 08:16:51 +03:00
|
|
|
/* Generate an end of block. Trace exception is also generated if needed.
|
2016-12-07 02:06:30 +03:00
|
|
|
If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
|
|
|
|
If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
|
|
|
|
S->TF. This is used by the syscall/sysret insns. */
|
2017-04-27 06:29:21 +03:00
|
|
|
static void
|
2024-04-10 15:14:10 +03:00
|
|
|
gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, bool jr)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
target/i386: Give IRQs a chance when resetting HF_INHIBIT_IRQ_MASK
When emulated with QEMU, interrupts will never come in the following
loop. However, if the NOP instruction is uncommented, interrupts will
fire as normal.
loop:
cli
call do_sti
jmp loop
do_sti:
sti
# nop
ret
This behavior is different from that of a real processor. For example,
if KVM is enabled, interrupts will always fire regardless of whether the
NOP instruction is commented or not. Also, the Intel Software Developer
Manual states that after the STI instruction is executed, the interrupt
inhibit should end as soon as the next instruction (e.g., the RET
instruction if the NOP instruction is commented) is executed.
This problem is caused because the previous code may choose not to end
the TB even if the HF_INHIBIT_IRQ_MASK has just been reset (e.g., in the
case where the STI instruction is immediately followed by the RET
instruction), so that IRQs may not have a change to trigger. This commit
fixes the problem by always terminating the current TB to give IRQs a
chance to trigger when HF_INHIBIT_IRQ_MASK is reset.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Ruihan Li <lrh2000@pku.edu.cn>
Message-ID: <20240415064518.4951-4-lrh2000@pku.edu.cn>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-04-15 09:45:21 +03:00
|
|
|
bool inhibit_reset;
|
|
|
|
|
2013-01-24 00:43:12 +04:00
|
|
|
gen_update_cc_op(s);
|
2016-03-03 08:16:51 +03:00
|
|
|
|
|
|
|
/* If several instructions disable interrupts, only the first does it. */
|
target/i386: Give IRQs a chance when resetting HF_INHIBIT_IRQ_MASK
When emulated with QEMU, interrupts will never come in the following
loop. However, if the NOP instruction is uncommented, interrupts will
fire as normal.
loop:
cli
call do_sti
jmp loop
do_sti:
sti
# nop
ret
This behavior is different from that of a real processor. For example,
if KVM is enabled, interrupts will always fire regardless of whether the
NOP instruction is commented or not. Also, the Intel Software Developer
Manual states that after the STI instruction is executed, the interrupt
inhibit should end as soon as the next instruction (e.g., the RET
instruction if the NOP instruction is commented) is executed.
This problem is caused because the previous code may choose not to end
the TB even if the HF_INHIBIT_IRQ_MASK has just been reset (e.g., in the
case where the STI instruction is immediately followed by the RET
instruction), so that IRQs may not have a change to trigger. This commit
fixes the problem by always terminating the current TB to give IRQs a
chance to trigger when HF_INHIBIT_IRQ_MASK is reset.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Ruihan Li <lrh2000@pku.edu.cn>
Message-ID: <20240415064518.4951-4-lrh2000@pku.edu.cn>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-04-15 09:45:21 +03:00
|
|
|
inhibit_reset = false;
|
|
|
|
if (s->flags & HF_INHIBIT_IRQ_MASK) {
|
2016-03-03 08:16:51 +03:00
|
|
|
gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
|
target/i386: Give IRQs a chance when resetting HF_INHIBIT_IRQ_MASK
When emulated with QEMU, interrupts will never come in the following
loop. However, if the NOP instruction is uncommented, interrupts will
fire as normal.
loop:
cli
call do_sti
jmp loop
do_sti:
sti
# nop
ret
This behavior is different from that of a real processor. For example,
if KVM is enabled, interrupts will always fire regardless of whether the
NOP instruction is commented or not. Also, the Intel Software Developer
Manual states that after the STI instruction is executed, the interrupt
inhibit should end as soon as the next instruction (e.g., the RET
instruction if the NOP instruction is commented) is executed.
This problem is caused because the previous code may choose not to end
the TB even if the HF_INHIBIT_IRQ_MASK has just been reset (e.g., in the
case where the STI instruction is immediately followed by the RET
instruction), so that IRQs may not have a change to trigger. This commit
fixes the problem by always terminating the current TB to give IRQs a
chance to trigger when HF_INHIBIT_IRQ_MASK is reset.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Ruihan Li <lrh2000@pku.edu.cn>
Message-ID: <20240415064518.4951-4-lrh2000@pku.edu.cn>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-04-15 09:45:21 +03:00
|
|
|
inhibit_reset = true;
|
|
|
|
} else if (inhibit) {
|
|
|
|
gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
|
2016-03-03 08:16:51 +03:00
|
|
|
}
|
|
|
|
|
2017-07-14 11:29:42 +03:00
|
|
|
if (s->base.tb->flags & HF_RF_MASK) {
|
2022-10-24 09:16:30 +03:00
|
|
|
gen_reset_eflags(s, RF_MASK);
|
2009-05-11 00:30:53 +04:00
|
|
|
}
|
2021-07-20 05:04:29 +03:00
|
|
|
if (recheck_tf) {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_rechecking_single_step(tcg_env);
|
2018-05-31 04:06:23 +03:00
|
|
|
tcg_gen_exit_tb(NULL, 0);
|
2024-05-25 11:03:22 +03:00
|
|
|
} else if ((s->flags & HF_TF_MASK) && !inhibit) {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_single_step(tcg_env);
|
target/i386: Give IRQs a chance when resetting HF_INHIBIT_IRQ_MASK
When emulated with QEMU, interrupts will never come in the following
loop. However, if the NOP instruction is uncommented, interrupts will
fire as normal.
loop:
cli
call do_sti
jmp loop
do_sti:
sti
# nop
ret
This behavior is different from that of a real processor. For example,
if KVM is enabled, interrupts will always fire regardless of whether the
NOP instruction is commented or not. Also, the Intel Software Developer
Manual states that after the STI instruction is executed, the interrupt
inhibit should end as soon as the next instruction (e.g., the RET
instruction if the NOP instruction is commented) is executed.
This problem is caused because the previous code may choose not to end
the TB even if the HF_INHIBIT_IRQ_MASK has just been reset (e.g., in the
case where the STI instruction is immediately followed by the RET
instruction), so that IRQs may not have a change to trigger. This commit
fixes the problem by always terminating the current TB to give IRQs a
chance to trigger when HF_INHIBIT_IRQ_MASK is reset.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Ruihan Li <lrh2000@pku.edu.cn>
Message-ID: <20240415064518.4951-4-lrh2000@pku.edu.cn>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2024-04-15 09:45:21 +03:00
|
|
|
} else if (jr &&
|
|
|
|
/* give irqs a chance to happen */
|
|
|
|
!inhibit_reset) {
|
2017-07-12 00:06:48 +03:00
|
|
|
tcg_gen_lookup_and_goto_ptr();
|
2003-10-01 00:34:21 +04:00
|
|
|
} else {
|
2018-05-31 04:06:23 +03:00
|
|
|
tcg_gen_exit_tb(NULL, 0);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2017-07-14 11:29:42 +03:00
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2017-04-27 06:29:21 +03:00
|
|
|
static inline void
|
2024-04-10 15:14:10 +03:00
|
|
|
gen_eob_syscall(DisasContext *s)
|
2017-04-27 06:29:21 +03:00
|
|
|
{
|
2024-04-10 15:14:10 +03:00
|
|
|
gen_eob_worker(s, false, true, false);
|
2017-04-27 06:29:21 +03:00
|
|
|
}
|
|
|
|
|
2024-04-10 15:14:10 +03:00
|
|
|
/* End of block. Set HF_INHIBIT_IRQ_MASK if it isn't already set. */
|
|
|
|
static void gen_eob_inhibit_irq(DisasContext *s)
|
2016-12-07 02:06:30 +03:00
|
|
|
{
|
2024-04-10 15:14:10 +03:00
|
|
|
gen_eob_worker(s, true, false, false);
|
2016-12-07 02:06:30 +03:00
|
|
|
}
|
|
|
|
|
2016-03-03 08:16:51 +03:00
|
|
|
/* End of block, resetting the inhibit irq flag. */
|
|
|
|
static void gen_eob(DisasContext *s)
|
|
|
|
{
|
2024-04-10 15:14:10 +03:00
|
|
|
gen_eob_worker(s, false, false, false);
|
2016-03-03 08:16:51 +03:00
|
|
|
}
|
|
|
|
|
2017-04-27 06:29:21 +03:00
|
|
|
/* Jump to register */
|
2022-10-01 17:09:22 +03:00
|
|
|
static void gen_jr(DisasContext *s)
|
2017-04-27 06:29:21 +03:00
|
|
|
{
|
2024-04-10 15:14:10 +03:00
|
|
|
gen_eob_worker(s, false, false, true);
|
2017-04-27 06:29:21 +03:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:27 +03:00
|
|
|
/* Jump to eip+diff, truncating the result to OT. */
|
2022-10-01 17:09:26 +03:00
|
|
|
static void gen_jmp_rel(DisasContext *s, MemOp ot, int diff, int tb_num)
|
|
|
|
{
|
2022-10-01 17:09:35 +03:00
|
|
|
bool use_goto_tb = s->jmp_opt;
|
|
|
|
target_ulong mask = -1;
|
|
|
|
target_ulong new_pc = s->pc + diff;
|
|
|
|
target_ulong new_eip = new_pc - s->cs_base;
|
2022-10-01 17:09:26 +03:00
|
|
|
|
2024-04-10 13:29:52 +03:00
|
|
|
assert(!s->cc_op_dirty);
|
|
|
|
|
2022-10-01 17:09:26 +03:00
|
|
|
/* In 64-bit mode, operand size is fixed at 64 bits. */
|
|
|
|
if (!CODE64(s)) {
|
|
|
|
if (ot == MO_16) {
|
2022-10-01 17:09:35 +03:00
|
|
|
mask = 0xffff;
|
2023-02-27 16:51:42 +03:00
|
|
|
if (tb_cflags(s->base.tb) & CF_PCREL && CODE32(s)) {
|
2022-10-01 17:09:35 +03:00
|
|
|
use_goto_tb = false;
|
|
|
|
}
|
2022-10-01 17:09:26 +03:00
|
|
|
} else {
|
2022-10-01 17:09:35 +03:00
|
|
|
mask = 0xffffffff;
|
2022-10-01 17:09:26 +03:00
|
|
|
}
|
|
|
|
}
|
2022-10-01 17:09:35 +03:00
|
|
|
new_eip &= mask;
|
2022-10-01 17:09:31 +03:00
|
|
|
|
2023-02-27 16:51:42 +03:00
|
|
|
if (tb_cflags(s->base.tb) & CF_PCREL) {
|
2022-10-01 17:09:35 +03:00
|
|
|
tcg_gen_addi_tl(cpu_eip, cpu_eip, new_pc - s->pc_save);
|
|
|
|
/*
|
|
|
|
* If we can prove the branch does not leave the page and we have
|
|
|
|
* no extra masking to apply (data16 branch in code32, see above),
|
|
|
|
* then we have also proven that the addition does not wrap.
|
|
|
|
*/
|
|
|
|
if (!use_goto_tb || !is_same_page(&s->base, new_pc)) {
|
|
|
|
tcg_gen_andi_tl(cpu_eip, cpu_eip, mask);
|
|
|
|
use_goto_tb = false;
|
|
|
|
}
|
2024-01-02 02:06:17 +03:00
|
|
|
} else if (!CODE64(s)) {
|
|
|
|
new_pc = (uint32_t)(new_eip + s->cs_base);
|
2022-10-01 17:09:35 +03:00
|
|
|
}
|
|
|
|
|
2023-12-12 20:01:38 +03:00
|
|
|
if (use_goto_tb && translator_use_goto_tb(&s->base, new_pc)) {
|
2022-10-01 17:09:31 +03:00
|
|
|
/* jump to same page: we can use a direct jump */
|
|
|
|
tcg_gen_goto_tb(tb_num);
|
2023-02-27 16:51:42 +03:00
|
|
|
if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
|
2022-10-01 17:09:35 +03:00
|
|
|
tcg_gen_movi_tl(cpu_eip, new_eip);
|
|
|
|
}
|
2022-10-01 17:09:31 +03:00
|
|
|
tcg_gen_exit_tb(s->base.tb, tb_num);
|
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
|
|
|
} else {
|
2023-02-27 16:51:42 +03:00
|
|
|
if (!(tb_cflags(s->base.tb) & CF_PCREL)) {
|
2022-10-01 17:09:35 +03:00
|
|
|
tcg_gen_movi_tl(cpu_eip, new_eip);
|
|
|
|
}
|
|
|
|
if (s->jmp_opt) {
|
|
|
|
gen_jr(s); /* jump to another page */
|
|
|
|
} else {
|
|
|
|
gen_eob(s); /* exit to main loop */
|
|
|
|
}
|
2022-10-01 17:09:31 +03:00
|
|
|
}
|
2022-10-01 17:09:26 +03:00
|
|
|
}
|
|
|
|
|
2022-10-01 17:09:27 +03:00
|
|
|
/* Jump to eip+diff, truncating to the current code size. */
|
|
|
|
static void gen_jmp_rel_csize(DisasContext *s, int diff, int tb_num)
|
2005-01-04 02:50:08 +03:00
|
|
|
{
|
2022-10-01 17:09:27 +03:00
|
|
|
/* CODE64 ignores the OT argument, so we need not consider it. */
|
|
|
|
gen_jmp_rel(s, CODE32(s) ? MO_32 : MO_16, diff, tb_num);
|
2005-01-04 02:50:08 +03:00
|
|
|
}
|
|
|
|
|
2013-10-31 09:04:05 +04:00
|
|
|
static inline void gen_ldq_env_A0(DisasContext *s, int offset)
|
2008-05-12 17:55:27 +04:00
|
|
|
{
|
2022-01-07 00:00:51 +03:00
|
|
|
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_st_i64(s->tmp1_i64, tcg_env, offset);
|
2008-05-12 17:55:27 +04:00
|
|
|
}
|
2005-01-08 21:58:29 +03:00
|
|
|
|
2013-10-31 09:04:05 +04:00
|
|
|
static inline void gen_stq_env_A0(DisasContext *s, int offset)
|
2008-05-12 17:55:27 +04:00
|
|
|
{
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_i64(s->tmp1_i64, tcg_env, offset);
|
2022-01-07 00:00:51 +03:00
|
|
|
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ);
|
2008-05-12 17:55:27 +04:00
|
|
|
}
|
2005-01-08 21:58:29 +03:00
|
|
|
|
2022-09-18 01:27:12 +03:00
|
|
|
static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align)
|
2008-05-12 17:55:27 +04:00
|
|
|
{
|
2023-08-24 21:08:44 +03:00
|
|
|
MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
|
|
|
|
? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
|
|
|
|
MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
|
2013-10-31 09:20:42 +04:00
|
|
|
int mem_index = s->mem_index;
|
2023-08-24 21:08:44 +03:00
|
|
|
TCGv_i128 t = tcg_temp_new_i128();
|
|
|
|
|
|
|
|
tcg_gen_qemu_ld_i128(t, s->A0, mem_index, mop);
|
|
|
|
tcg_gen_st_i128(t, tcg_env, offset);
|
2008-05-12 17:55:27 +04:00
|
|
|
}
|
2005-01-04 02:50:08 +03:00
|
|
|
|
2022-09-18 01:27:12 +03:00
|
|
|
static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align)
|
2008-05-12 17:55:27 +04:00
|
|
|
{
|
2023-08-24 21:08:44 +03:00
|
|
|
MemOp atom = (s->cpuid_ext_features & CPUID_EXT_AVX
|
|
|
|
? MO_ATOM_IFALIGN : MO_ATOM_IFALIGN_PAIR);
|
|
|
|
MemOp mop = MO_128 | MO_LE | atom | (align ? MO_ALIGN_16 : 0);
|
2013-10-31 09:20:42 +04:00
|
|
|
int mem_index = s->mem_index;
|
2023-08-24 21:08:44 +03:00
|
|
|
TCGv_i128 t = tcg_temp_new_i128();
|
|
|
|
|
|
|
|
tcg_gen_ld_i128(t, tcg_env, offset);
|
|
|
|
tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop);
|
2008-05-12 17:55:27 +04:00
|
|
|
}
|
2005-01-04 02:50:08 +03:00
|
|
|
|
2022-08-23 15:55:56 +03:00
|
|
|
static void gen_ldy_env_A0(DisasContext *s, int offset, bool align)
|
|
|
|
{
|
2023-08-24 21:08:44 +03:00
|
|
|
MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
|
2022-08-23 15:55:56 +03:00
|
|
|
int mem_index = s->mem_index;
|
2023-08-24 21:08:44 +03:00
|
|
|
TCGv_i128 t0 = tcg_temp_new_i128();
|
|
|
|
TCGv_i128 t1 = tcg_temp_new_i128();
|
2022-08-23 15:55:56 +03:00
|
|
|
|
2023-08-24 21:08:44 +03:00
|
|
|
tcg_gen_qemu_ld_i128(t0, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
|
2022-08-23 15:55:56 +03:00
|
|
|
tcg_gen_addi_tl(s->tmp0, s->A0, 16);
|
2023-08-24 21:08:44 +03:00
|
|
|
tcg_gen_qemu_ld_i128(t1, s->tmp0, mem_index, mop);
|
|
|
|
|
|
|
|
tcg_gen_st_i128(t0, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
|
|
|
|
tcg_gen_st_i128(t1, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
|
2022-08-23 15:55:56 +03:00
|
|
|
}
|
|
|
|
|
2022-09-20 12:42:45 +03:00
|
|
|
static void gen_sty_env_A0(DisasContext *s, int offset, bool align)
|
|
|
|
{
|
2023-08-24 21:08:44 +03:00
|
|
|
MemOp mop = MO_128 | MO_LE | MO_ATOM_IFALIGN_PAIR;
|
2022-09-20 12:42:45 +03:00
|
|
|
int mem_index = s->mem_index;
|
2023-08-24 21:08:44 +03:00
|
|
|
TCGv_i128 t = tcg_temp_new_i128();
|
|
|
|
|
|
|
|
tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(0)));
|
|
|
|
tcg_gen_qemu_st_i128(t, s->A0, mem_index, mop | (align ? MO_ALIGN_32 : 0));
|
2022-09-20 12:42:45 +03:00
|
|
|
tcg_gen_addi_tl(s->tmp0, s->A0, 16);
|
2023-08-24 21:08:44 +03:00
|
|
|
tcg_gen_ld_i128(t, tcg_env, offset + offsetof(YMMReg, YMM_X(1)));
|
|
|
|
tcg_gen_qemu_st_i128(t, s->tmp0, mem_index, mop);
|
2022-09-20 12:42:45 +03:00
|
|
|
}
|
|
|
|
|
2022-11-09 07:22:15 +03:00
|
|
|
static void gen_cmpxchg8b(DisasContext *s, CPUX86State *env, int modrm)
|
|
|
|
{
|
2022-11-09 07:59:03 +03:00
|
|
|
TCGv_i64 cmp, val, old;
|
|
|
|
TCGv Z;
|
|
|
|
|
2022-11-09 07:22:15 +03:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
|
|
|
|
2022-11-09 07:59:03 +03:00
|
|
|
cmp = tcg_temp_new_i64();
|
|
|
|
val = tcg_temp_new_i64();
|
|
|
|
old = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
/* Construct the comparison values from the register pair. */
|
|
|
|
tcg_gen_concat_tl_i64(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
|
|
|
|
tcg_gen_concat_tl_i64(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
|
|
|
|
|
|
|
|
/* Only require atomic with LOCK; non-parallel handled in generator. */
|
|
|
|
if (s->prefix & PREFIX_LOCK) {
|
|
|
|
tcg_gen_atomic_cmpxchg_i64(old, s->A0, cmp, val, s->mem_index, MO_TEUQ);
|
2022-11-09 07:22:15 +03:00
|
|
|
} else {
|
2022-11-09 07:59:03 +03:00
|
|
|
tcg_gen_nonatomic_cmpxchg_i64(old, s->A0, cmp, val,
|
|
|
|
s->mem_index, MO_TEUQ);
|
2022-11-09 07:22:15 +03:00
|
|
|
}
|
2022-11-09 07:59:03 +03:00
|
|
|
|
|
|
|
/* Set tmp0 to match the required value of Z. */
|
|
|
|
tcg_gen_setcond_i64(TCG_COND_EQ, cmp, old, cmp);
|
|
|
|
Z = tcg_temp_new();
|
|
|
|
tcg_gen_trunc_i64_tl(Z, cmp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract the result values for the register pair.
|
|
|
|
* For 32-bit, we may do this unconditionally, because on success (Z=1),
|
|
|
|
* the old value matches the previous value in EDX:EAX. For x86_64,
|
|
|
|
* the store must be conditional, because we must leave the source
|
|
|
|
* registers unchanged on success, and zero-extend the writeback
|
|
|
|
* on failure (Z=0).
|
|
|
|
*/
|
|
|
|
if (TARGET_LONG_BITS == 32) {
|
|
|
|
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], old);
|
|
|
|
} else {
|
|
|
|
TCGv zero = tcg_constant_tl(0);
|
|
|
|
|
|
|
|
tcg_gen_extr_i64_tl(s->T0, s->T1, old);
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EAX], Z, zero,
|
|
|
|
s->T0, cpu_regs[R_EAX]);
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, cpu_regs[R_EDX], Z, zero,
|
|
|
|
s->T1, cpu_regs[R_EDX]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update Z. */
|
|
|
|
gen_compute_eflags(s);
|
|
|
|
tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, Z, ctz32(CC_Z), 1);
|
2022-11-09 07:22:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
static void gen_cmpxchg16b(DisasContext *s, CPUX86State *env, int modrm)
|
|
|
|
{
|
2022-11-09 15:53:10 +03:00
|
|
|
MemOp mop = MO_TE | MO_128 | MO_ALIGN;
|
|
|
|
TCGv_i64 t0, t1;
|
|
|
|
TCGv_i128 cmp, val;
|
|
|
|
|
2022-11-09 07:22:15 +03:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
|
|
|
|
2022-11-09 15:53:10 +03:00
|
|
|
cmp = tcg_temp_new_i128();
|
|
|
|
val = tcg_temp_new_i128();
|
|
|
|
tcg_gen_concat_i64_i128(cmp, cpu_regs[R_EAX], cpu_regs[R_EDX]);
|
|
|
|
tcg_gen_concat_i64_i128(val, cpu_regs[R_EBX], cpu_regs[R_ECX]);
|
|
|
|
|
|
|
|
/* Only require atomic with LOCK; non-parallel handled in generator. */
|
|
|
|
if (s->prefix & PREFIX_LOCK) {
|
|
|
|
tcg_gen_atomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
|
2022-11-09 07:22:15 +03:00
|
|
|
} else {
|
2022-11-09 15:53:10 +03:00
|
|
|
tcg_gen_nonatomic_cmpxchg_i128(val, s->A0, cmp, val, s->mem_index, mop);
|
2022-11-09 07:22:15 +03:00
|
|
|
}
|
2022-11-09 15:53:10 +03:00
|
|
|
|
|
|
|
tcg_gen_extr_i128_i64(s->T0, s->T1, val);
|
|
|
|
|
|
|
|
/* Determine success after the fact. */
|
|
|
|
t0 = tcg_temp_new_i64();
|
|
|
|
t1 = tcg_temp_new_i64();
|
|
|
|
tcg_gen_xor_i64(t0, s->T0, cpu_regs[R_EAX]);
|
|
|
|
tcg_gen_xor_i64(t1, s->T1, cpu_regs[R_EDX]);
|
|
|
|
tcg_gen_or_i64(t0, t0, t1);
|
|
|
|
|
|
|
|
/* Update Z. */
|
|
|
|
gen_compute_eflags(s);
|
|
|
|
tcg_gen_setcondi_i64(TCG_COND_EQ, t0, t0, 0);
|
|
|
|
tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, t0, ctz32(CC_Z), 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Extract the result values for the register pair. We may do this
|
|
|
|
* unconditionally, because on success (Z=1), the old value matches
|
|
|
|
* the previous value in RDX:RAX.
|
|
|
|
*/
|
|
|
|
tcg_gen_mov_i64(cpu_regs[R_EAX], s->T0);
|
|
|
|
tcg_gen_mov_i64(cpu_regs[R_EDX], s->T1);
|
2022-11-09 07:22:15 +03:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-10-13 09:42:58 +03:00
|
|
|
static bool disas_insn_x87(DisasContext *s, CPUState *cpu, int b)
|
|
|
|
{
|
|
|
|
CPUX86State *env = cpu_env(cpu);
|
|
|
|
bool update_fip = true;
|
|
|
|
int modrm, mod, rm, op;
|
|
|
|
|
|
|
|
if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
|
|
|
|
/* if CR0.EM or CR0.TS are set, generate an FPU exception */
|
|
|
|
/* XXX: what to do if illegal op ? */
|
|
|
|
gen_exception(s, EXCP07_PREX);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
modrm = x86_ldub_code(env, s);
|
|
|
|
mod = (modrm >> 6) & 3;
|
|
|
|
rm = modrm & 7;
|
|
|
|
op = ((b & 7) << 3) | ((modrm >> 3) & 7);
|
|
|
|
if (mod != 3) {
|
|
|
|
/* memory op */
|
|
|
|
AddressParts a = gen_lea_modrm_0(env, s, modrm);
|
|
|
|
TCGv ea = gen_lea_modrm_1(s, a, false);
|
|
|
|
TCGv last_addr = tcg_temp_new();
|
|
|
|
bool update_fdp = true;
|
|
|
|
|
|
|
|
tcg_gen_mov_tl(last_addr, ea);
|
|
|
|
gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
|
|
|
|
|
|
|
|
switch (op) {
|
|
|
|
case 0x00 ... 0x07: /* fxxxs */
|
|
|
|
case 0x10 ... 0x17: /* fixxxl */
|
|
|
|
case 0x20 ... 0x27: /* fxxxl */
|
|
|
|
case 0x30 ... 0x37: /* fixxx */
|
|
|
|
{
|
|
|
|
int op1;
|
|
|
|
op1 = op & 7;
|
|
|
|
|
|
|
|
switch (op >> 4) {
|
|
|
|
case 0:
|
|
|
|
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUL);
|
|
|
|
gen_helper_flds_FT0(tcg_env, s->tmp2_i32);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUL);
|
|
|
|
gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
|
|
|
|
s->mem_index, MO_LEUQ);
|
|
|
|
gen_helper_fldl_FT0(tcg_env, s->tmp1_i64);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
default:
|
|
|
|
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LESW);
|
|
|
|
gen_helper_fildl_FT0(tcg_env, s->tmp2_i32);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
gen_helper_fp_arith_ST0_FT0(op1);
|
|
|
|
if (op1 == 3) {
|
|
|
|
/* fcomp needs pop */
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x08: /* flds */
|
|
|
|
case 0x0a: /* fsts */
|
|
|
|
case 0x0b: /* fstps */
|
|
|
|
case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
|
|
|
|
case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
|
|
|
|
case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
|
|
|
|
switch (op & 7) {
|
|
|
|
case 0:
|
|
|
|
switch (op >> 4) {
|
|
|
|
case 0:
|
|
|
|
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUL);
|
|
|
|
gen_helper_flds_ST0(tcg_env, s->tmp2_i32);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUL);
|
|
|
|
gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
|
|
|
|
s->mem_index, MO_LEUQ);
|
|
|
|
gen_helper_fldl_ST0(tcg_env, s->tmp1_i64);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
default:
|
|
|
|
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LESW);
|
|
|
|
gen_helper_fildl_ST0(tcg_env, s->tmp2_i32);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
/* XXX: the corresponding CPUID bit must be tested ! */
|
|
|
|
switch (op >> 4) {
|
|
|
|
case 1:
|
|
|
|
gen_helper_fisttl_ST0(s->tmp2_i32, tcg_env);
|
|
|
|
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUL);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
gen_helper_fisttll_ST0(s->tmp1_i64, tcg_env);
|
|
|
|
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
|
|
|
|
s->mem_index, MO_LEUQ);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
default:
|
|
|
|
gen_helper_fistt_ST0(s->tmp2_i32, tcg_env);
|
|
|
|
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUW);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
switch (op >> 4) {
|
|
|
|
case 0:
|
|
|
|
gen_helper_fsts_ST0(s->tmp2_i32, tcg_env);
|
|
|
|
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUL);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
gen_helper_fistl_ST0(s->tmp2_i32, tcg_env);
|
|
|
|
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUL);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
gen_helper_fstl_ST0(s->tmp1_i64, tcg_env);
|
|
|
|
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
|
|
|
|
s->mem_index, MO_LEUQ);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
default:
|
|
|
|
gen_helper_fist_ST0(s->tmp2_i32, tcg_env);
|
|
|
|
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUW);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if ((op & 7) == 3) {
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x0c: /* fldenv mem */
|
|
|
|
gen_helper_fldenv(tcg_env, s->A0,
|
|
|
|
tcg_constant_i32(s->dflag - 1));
|
|
|
|
update_fip = update_fdp = false;
|
|
|
|
break;
|
|
|
|
case 0x0d: /* fldcw mem */
|
|
|
|
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUW);
|
|
|
|
gen_helper_fldcw(tcg_env, s->tmp2_i32);
|
|
|
|
update_fip = update_fdp = false;
|
|
|
|
break;
|
|
|
|
case 0x0e: /* fnstenv mem */
|
|
|
|
gen_helper_fstenv(tcg_env, s->A0,
|
|
|
|
tcg_constant_i32(s->dflag - 1));
|
|
|
|
update_fip = update_fdp = false;
|
|
|
|
break;
|
|
|
|
case 0x0f: /* fnstcw mem */
|
|
|
|
gen_helper_fnstcw(s->tmp2_i32, tcg_env);
|
|
|
|
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUW);
|
|
|
|
update_fip = update_fdp = false;
|
|
|
|
break;
|
|
|
|
case 0x1d: /* fldt mem */
|
|
|
|
gen_helper_fldt_ST0(tcg_env, s->A0);
|
|
|
|
break;
|
|
|
|
case 0x1f: /* fstpt mem */
|
|
|
|
gen_helper_fstt_ST0(tcg_env, s->A0);
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
break;
|
|
|
|
case 0x2c: /* frstor mem */
|
|
|
|
gen_helper_frstor(tcg_env, s->A0,
|
|
|
|
tcg_constant_i32(s->dflag - 1));
|
|
|
|
update_fip = update_fdp = false;
|
|
|
|
break;
|
|
|
|
case 0x2e: /* fnsave mem */
|
|
|
|
gen_helper_fsave(tcg_env, s->A0,
|
|
|
|
tcg_constant_i32(s->dflag - 1));
|
|
|
|
update_fip = update_fdp = false;
|
|
|
|
break;
|
|
|
|
case 0x2f: /* fnstsw mem */
|
|
|
|
gen_helper_fnstsw(s->tmp2_i32, tcg_env);
|
|
|
|
tcg_gen_qemu_st_i32(s->tmp2_i32, s->A0,
|
|
|
|
s->mem_index, MO_LEUW);
|
|
|
|
update_fip = update_fdp = false;
|
|
|
|
break;
|
|
|
|
case 0x3c: /* fbld */
|
|
|
|
gen_helper_fbld_ST0(tcg_env, s->A0);
|
|
|
|
break;
|
|
|
|
case 0x3e: /* fbstp */
|
|
|
|
gen_helper_fbst_ST0(tcg_env, s->A0);
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
break;
|
|
|
|
case 0x3d: /* fildll */
|
|
|
|
tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0,
|
|
|
|
s->mem_index, MO_LEUQ);
|
|
|
|
gen_helper_fildll_ST0(tcg_env, s->tmp1_i64);
|
|
|
|
break;
|
|
|
|
case 0x3f: /* fistpll */
|
|
|
|
gen_helper_fistll_ST0(s->tmp1_i64, tcg_env);
|
|
|
|
tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0,
|
|
|
|
s->mem_index, MO_LEUQ);
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (update_fdp) {
|
|
|
|
int last_seg = s->override >= 0 ? s->override : a.def_seg;
|
|
|
|
|
|
|
|
tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
|
|
|
|
offsetof(CPUX86State,
|
|
|
|
segs[last_seg].selector));
|
|
|
|
tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
|
|
|
|
offsetof(CPUX86State, fpds));
|
|
|
|
tcg_gen_st_tl(last_addr, tcg_env,
|
|
|
|
offsetof(CPUX86State, fpdp));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* register float ops */
|
|
|
|
int opreg = rm;
|
|
|
|
|
|
|
|
switch (op) {
|
|
|
|
case 0x08: /* fld sti */
|
|
|
|
gen_helper_fpush(tcg_env);
|
|
|
|
gen_helper_fmov_ST0_STN(tcg_env,
|
|
|
|
tcg_constant_i32((opreg + 1) & 7));
|
|
|
|
break;
|
|
|
|
case 0x09: /* fxchg sti */
|
|
|
|
case 0x29: /* fxchg4 sti, undocumented op */
|
|
|
|
case 0x39: /* fxchg7 sti, undocumented op */
|
|
|
|
gen_helper_fxchg_ST0_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
break;
|
|
|
|
case 0x0a: /* grp d9/2 */
|
|
|
|
switch (rm) {
|
|
|
|
case 0: /* fnop */
|
|
|
|
/*
|
|
|
|
* check exceptions (FreeBSD FPU probe)
|
|
|
|
* needs to be treated as I/O because of ferr_irq
|
|
|
|
*/
|
|
|
|
translator_io_start(&s->base);
|
|
|
|
gen_helper_fwait(tcg_env);
|
|
|
|
update_fip = false;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x0c: /* grp d9/4 */
|
|
|
|
switch (rm) {
|
|
|
|
case 0: /* fchs */
|
|
|
|
gen_helper_fchs_ST0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 1: /* fabs */
|
|
|
|
gen_helper_fabs_ST0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 4: /* ftst */
|
|
|
|
gen_helper_fldz_FT0(tcg_env);
|
|
|
|
gen_helper_fcom_ST0_FT0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 5: /* fxam */
|
|
|
|
gen_helper_fxam_ST0(tcg_env);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x0d: /* grp d9/5 */
|
|
|
|
{
|
|
|
|
switch (rm) {
|
|
|
|
case 0:
|
|
|
|
gen_helper_fpush(tcg_env);
|
|
|
|
gen_helper_fld1_ST0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
gen_helper_fpush(tcg_env);
|
|
|
|
gen_helper_fldl2t_ST0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
gen_helper_fpush(tcg_env);
|
|
|
|
gen_helper_fldl2e_ST0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
gen_helper_fpush(tcg_env);
|
|
|
|
gen_helper_fldpi_ST0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
gen_helper_fpush(tcg_env);
|
|
|
|
gen_helper_fldlg2_ST0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
gen_helper_fpush(tcg_env);
|
|
|
|
gen_helper_fldln2_ST0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
gen_helper_fpush(tcg_env);
|
|
|
|
gen_helper_fldz_ST0(tcg_env);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x0e: /* grp d9/6 */
|
|
|
|
switch (rm) {
|
|
|
|
case 0: /* f2xm1 */
|
|
|
|
gen_helper_f2xm1(tcg_env);
|
|
|
|
break;
|
|
|
|
case 1: /* fyl2x */
|
|
|
|
gen_helper_fyl2x(tcg_env);
|
|
|
|
break;
|
|
|
|
case 2: /* fptan */
|
|
|
|
gen_helper_fptan(tcg_env);
|
|
|
|
break;
|
|
|
|
case 3: /* fpatan */
|
|
|
|
gen_helper_fpatan(tcg_env);
|
|
|
|
break;
|
|
|
|
case 4: /* fxtract */
|
|
|
|
gen_helper_fxtract(tcg_env);
|
|
|
|
break;
|
|
|
|
case 5: /* fprem1 */
|
|
|
|
gen_helper_fprem1(tcg_env);
|
|
|
|
break;
|
|
|
|
case 6: /* fdecstp */
|
|
|
|
gen_helper_fdecstp(tcg_env);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case 7: /* fincstp */
|
|
|
|
gen_helper_fincstp(tcg_env);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x0f: /* grp d9/7 */
|
|
|
|
switch (rm) {
|
|
|
|
case 0: /* fprem */
|
|
|
|
gen_helper_fprem(tcg_env);
|
|
|
|
break;
|
|
|
|
case 1: /* fyl2xp1 */
|
|
|
|
gen_helper_fyl2xp1(tcg_env);
|
|
|
|
break;
|
|
|
|
case 2: /* fsqrt */
|
|
|
|
gen_helper_fsqrt(tcg_env);
|
|
|
|
break;
|
|
|
|
case 3: /* fsincos */
|
|
|
|
gen_helper_fsincos(tcg_env);
|
|
|
|
break;
|
|
|
|
case 5: /* fscale */
|
|
|
|
gen_helper_fscale(tcg_env);
|
|
|
|
break;
|
|
|
|
case 4: /* frndint */
|
|
|
|
gen_helper_frndint(tcg_env);
|
|
|
|
break;
|
|
|
|
case 6: /* fsin */
|
|
|
|
gen_helper_fsin(tcg_env);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case 7: /* fcos */
|
|
|
|
gen_helper_fcos(tcg_env);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
|
|
|
|
case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
|
|
|
|
case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
|
|
|
|
{
|
|
|
|
int op1;
|
|
|
|
|
|
|
|
op1 = op & 7;
|
|
|
|
if (op >= 0x20) {
|
|
|
|
gen_helper_fp_arith_STN_ST0(op1, opreg);
|
|
|
|
if (op >= 0x30) {
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env,
|
|
|
|
tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fp_arith_ST0_FT0(op1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x02: /* fcom */
|
|
|
|
case 0x22: /* fcom2, undocumented op */
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fcom_ST0_FT0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 0x03: /* fcomp */
|
|
|
|
case 0x23: /* fcomp3, undocumented op */
|
|
|
|
case 0x32: /* fcomp5, undocumented op */
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fcom_ST0_FT0(tcg_env);
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
break;
|
|
|
|
case 0x15: /* da/5 */
|
|
|
|
switch (rm) {
|
|
|
|
case 1: /* fucompp */
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
|
|
|
|
gen_helper_fucom_ST0_FT0(tcg_env);
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x1c:
|
|
|
|
switch (rm) {
|
|
|
|
case 0: /* feni (287 only, just do nop here) */
|
|
|
|
break;
|
|
|
|
case 1: /* fdisi (287 only, just do nop here) */
|
|
|
|
break;
|
|
|
|
case 2: /* fclex */
|
|
|
|
gen_helper_fclex(tcg_env);
|
|
|
|
update_fip = false;
|
|
|
|
break;
|
|
|
|
case 3: /* fninit */
|
|
|
|
gen_helper_fninit(tcg_env);
|
|
|
|
update_fip = false;
|
|
|
|
break;
|
|
|
|
case 4: /* fsetpm (287 only, just do nop here) */
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x1d: /* fucomi */
|
|
|
|
if (!(s->cpuid_features & CPUID_CMOV)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fucomi_ST0_FT0(tcg_env);
|
2024-05-17 00:04:28 +03:00
|
|
|
assume_cc_op(s, CC_OP_EFLAGS);
|
2023-10-13 09:42:58 +03:00
|
|
|
break;
|
|
|
|
case 0x1e: /* fcomi */
|
|
|
|
if (!(s->cpuid_features & CPUID_CMOV)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fcomi_ST0_FT0(tcg_env);
|
2024-05-17 00:04:28 +03:00
|
|
|
assume_cc_op(s, CC_OP_EFLAGS);
|
2023-10-13 09:42:58 +03:00
|
|
|
break;
|
|
|
|
case 0x28: /* ffree sti */
|
|
|
|
gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
break;
|
|
|
|
case 0x2a: /* fst sti */
|
|
|
|
gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
break;
|
|
|
|
case 0x2b: /* fstp sti */
|
|
|
|
case 0x0b: /* fstp1 sti, undocumented op */
|
|
|
|
case 0x3a: /* fstp8 sti, undocumented op */
|
|
|
|
case 0x3b: /* fstp9 sti, undocumented op */
|
|
|
|
gen_helper_fmov_STN_ST0(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
break;
|
|
|
|
case 0x2c: /* fucom st(i) */
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fucom_ST0_FT0(tcg_env);
|
|
|
|
break;
|
|
|
|
case 0x2d: /* fucomp st(i) */
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fucom_ST0_FT0(tcg_env);
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
break;
|
|
|
|
case 0x33: /* de/3 */
|
|
|
|
switch (rm) {
|
|
|
|
case 1: /* fcompp */
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(1));
|
|
|
|
gen_helper_fcom_ST0_FT0(tcg_env);
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x38: /* ffreep sti, undocumented op */
|
|
|
|
gen_helper_ffree_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fpop(tcg_env);
|
|
|
|
break;
|
|
|
|
case 0x3c: /* df/4 */
|
|
|
|
switch (rm) {
|
|
|
|
case 0:
|
|
|
|
gen_helper_fnstsw(s->tmp2_i32, tcg_env);
|
|
|
|
tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
|
|
|
|
gen_op_mov_reg_v(s, MO_16, R_EAX, s->T0);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x3d: /* fucomip */
|
|
|
|
if (!(s->cpuid_features & CPUID_CMOV)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fucomi_ST0_FT0(tcg_env);
|
|
|
|
gen_helper_fpop(tcg_env);
|
2024-05-17 00:04:28 +03:00
|
|
|
assume_cc_op(s, CC_OP_EFLAGS);
|
2023-10-13 09:42:58 +03:00
|
|
|
break;
|
|
|
|
case 0x3e: /* fcomip */
|
|
|
|
if (!(s->cpuid_features & CPUID_CMOV)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
|
|
|
gen_helper_fmov_FT0_STN(tcg_env, tcg_constant_i32(opreg));
|
|
|
|
gen_helper_fcomi_ST0_FT0(tcg_env);
|
|
|
|
gen_helper_fpop(tcg_env);
|
2024-05-17 00:04:28 +03:00
|
|
|
assume_cc_op(s, CC_OP_EFLAGS);
|
2023-10-13 09:42:58 +03:00
|
|
|
break;
|
|
|
|
case 0x10 ... 0x13: /* fcmovxx */
|
|
|
|
case 0x18 ... 0x1b:
|
|
|
|
{
|
|
|
|
int op1;
|
|
|
|
TCGLabel *l1;
|
|
|
|
static const uint8_t fcmov_cc[8] = {
|
|
|
|
(JCC_B << 1),
|
|
|
|
(JCC_Z << 1),
|
|
|
|
(JCC_BE << 1),
|
|
|
|
(JCC_P << 1),
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!(s->cpuid_features & CPUID_CMOV)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
|
|
|
|
l1 = gen_new_label();
|
|
|
|
gen_jcc1_noeob(s, op1, l1);
|
|
|
|
gen_helper_fmov_ST0_STN(tcg_env,
|
|
|
|
tcg_constant_i32(opreg));
|
|
|
|
gen_set_label(l1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (update_fip) {
|
|
|
|
tcg_gen_ld_i32(s->tmp2_i32, tcg_env,
|
|
|
|
offsetof(CPUX86State, segs[R_CS].selector));
|
|
|
|
tcg_gen_st16_i32(s->tmp2_i32, tcg_env,
|
|
|
|
offsetof(CPUX86State, fpcs));
|
|
|
|
tcg_gen_st_tl(eip_cur_tl(s),
|
|
|
|
tcg_env, offsetof(CPUX86State, fpip));
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
|
|
|
|
illegal_op:
|
|
|
|
gen_illegal_opcode(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2023-12-22 19:30:06 +03:00
|
|
|
static void disas_insn_old(DisasContext *s, CPUState *cpu, int b)
|
|
|
|
{
|
|
|
|
CPUX86State *env = cpu_env(cpu);
|
|
|
|
int prefixes = s->prefix;
|
|
|
|
MemOp dflag = s->dflag;
|
|
|
|
int shift;
|
|
|
|
MemOp ot;
|
|
|
|
int modrm, reg, rm, mod, op, opreg, val;
|
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
/* now check op code */
|
target/i386: add core of new i386 decoder
The new decoder is based on three principles:
- use mostly table-driven decoding, using tables derived as much as possible
from the Intel manual. Centralizing the decode the operands makes it
more homogeneous, for example all immediates are signed. All modrm
handling is in one function, and can be shared between SSE and ALU
instructions (including XMM<->GPR instructions). The SSE/AVX decoder
will also not have duplicated code between the 0F, 0F38 and 0F3A tables.
- keep the code as "non-branchy" as possible. Generally, the code for
the new decoder is more verbose, but the control flow is simpler.
Conditionals are not nested and have small bodies. All instruction
groups are resolved even before operands are decoded, and code
generation is separated as much as possible within small functions
that only handle one instruction each.
- keep address generation and (for ALU operands) memory loads and writeback
as much in common code as possible. All ALU operations for example
are implemented as T0=f(T0,T1). For non-ALU instructions,
read-modify-write memory operations are rare, but registers do not
have TCGv equivalents: therefore, the common logic sets up pointer
temporaries with the operands, while load and writeback are handled
by gvec or by helpers.
These principles make future code review and extensibility simpler, at
the cost of having a relatively large amount of code in the form of this
patch. Even EVEX should not be _too_ hard to implement (it's just a crazy
large amount of possibilities).
This patch introduces the main decoder flow, and integrates the old
decoder with the new one. The old decoder takes care of parsing
prefixes and then optionally drops to the new one. The changes to the
old decoder are minimal and allow it to be replaced incrementally with
the new one.
There is a debugging mechanism through a "LIMIT" environment variable.
In user-mode emulation, the variable is the number of instructions
decoded by the new decoder before permanently switching to the old one.
In system emulation, the variable is the highest opcode that is decoded
by the new decoder (this is less friendly, but it's the best that can
be done without requiring deterministic execution).
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2022-08-23 12:20:55 +03:00
|
|
|
switch (b) {
|
2003-10-01 00:34:21 +04:00
|
|
|
/**************************/
|
|
|
|
/* arith & logic */
|
|
|
|
case 0x1c0:
|
|
|
|
case 0x1c1: /* xadd Ev, Gv */
|
2013-11-06 03:37:57 +04:00
|
|
|
ot = mo_b_d(b, dflag);
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2021-05-14 18:13:09 +03:00
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
2003-10-01 00:34:21 +04:00
|
|
|
mod = (modrm >> 6) & 3;
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, s->T0, reg);
|
2003-10-01 00:34:21 +04:00
|
|
|
if (mod == 3) {
|
2005-01-04 02:50:08 +03:00
|
|
|
rm = (modrm & 7) | REX_B(s);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, s->T1, rm);
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_add_tl(s->T0, s->T0, s->T1);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, reg, s->T1);
|
|
|
|
gen_op_mov_reg_v(s, ot, rm, s->T0);
|
2003-10-01 00:34:21 +04:00
|
|
|
} else {
|
2013-11-02 22:55:59 +04:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2016-06-27 22:02:02 +03:00
|
|
|
if (s->prefix & PREFIX_LOCK) {
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_atomic_fetch_add_tl(s->T1, s->A0, s->T0,
|
2016-06-27 22:02:02 +03:00
|
|
|
s->mem_index, ot | MO_LE);
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_add_tl(s->T0, s->T0, s->T1);
|
2016-06-27 22:02:02 +03:00
|
|
|
} else {
|
2018-09-11 21:50:46 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T1, s->A0);
|
|
|
|
tcg_gen_add_tl(s->T0, s->T0, s->T1);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, ot, s->T0, s->A0);
|
2016-06-27 22:02:02 +03:00
|
|
|
}
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, reg, s->T1);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_update2_cc(s);
|
2013-01-24 00:30:52 +04:00
|
|
|
set_cc_op(s, CC_OP_ADDB + ot);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 0x1b0:
|
|
|
|
case 0x1b1: /* cmpxchg Ev, Gv */
|
2008-05-17 17:50:02 +04:00
|
|
|
{
|
2022-09-11 15:04:36 +03:00
|
|
|
TCGv oldv, newv, cmpv, dest;
|
2008-05-17 17:50:02 +04:00
|
|
|
|
2013-11-06 03:37:57 +04:00
|
|
|
ot = mo_b_d(b, dflag);
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2021-05-14 18:13:09 +03:00
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
2008-05-17 17:50:02 +04:00
|
|
|
mod = (modrm >> 6) & 3;
|
2016-06-27 22:01:51 +03:00
|
|
|
oldv = tcg_temp_new();
|
|
|
|
newv = tcg_temp_new();
|
|
|
|
cmpv = tcg_temp_new();
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, newv, reg);
|
2016-06-27 22:01:51 +03:00
|
|
|
tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
|
2022-09-11 15:04:36 +03:00
|
|
|
gen_extu(ot, cmpv);
|
2016-06-27 22:01:51 +03:00
|
|
|
if (s->prefix & PREFIX_LOCK) {
|
|
|
|
if (mod == 3) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2013-11-02 22:55:59 +04:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_atomic_cmpxchg_tl(oldv, s->A0, cmpv, newv,
|
2016-06-27 22:01:51 +03:00
|
|
|
s->mem_index, ot | MO_LE);
|
2008-05-17 17:50:02 +04:00
|
|
|
} else {
|
2016-06-27 22:01:51 +03:00
|
|
|
if (mod == 3) {
|
|
|
|
rm = (modrm & 7) | REX_B(s);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, oldv, rm);
|
2022-09-11 15:04:36 +03:00
|
|
|
gen_extu(ot, oldv);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlike the memory case, where "the destination operand receives
|
|
|
|
* a write cycle without regard to the result of the comparison",
|
|
|
|
* rm must not be touched altogether if the write fails, including
|
|
|
|
* not zero-extending it on 64-bit processors. So, precompute
|
|
|
|
* the result of a successful writeback and perform the movcond
|
|
|
|
* directly on cpu_regs. Also need to write accumulator first, in
|
|
|
|
* case rm is part of RAX too.
|
|
|
|
*/
|
|
|
|
dest = gen_op_deposit_reg_v(s, ot, rm, newv, newv);
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, newv, dest);
|
2016-06-27 22:01:51 +03:00
|
|
|
} else {
|
|
|
|
gen_lea_modrm(env, s, modrm);
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_op_ld_v(s, ot, oldv, s->A0);
|
2022-09-11 15:04:36 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform an unconditional store cycle like physical cpu;
|
|
|
|
* must be before changing accumulator to ensure
|
|
|
|
* idempotency if the store faults and the instruction
|
|
|
|
* is restarted
|
|
|
|
*/
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_op_st_v(s, ot, newv, s->A0);
|
2016-06-27 22:01:51 +03:00
|
|
|
}
|
2008-05-17 17:50:02 +04:00
|
|
|
}
|
2022-09-11 15:04:36 +03:00
|
|
|
/*
|
|
|
|
* Write EAX only if the cmpxchg fails; reuse newv as the destination,
|
|
|
|
* since it's dead here.
|
|
|
|
*/
|
|
|
|
dest = gen_op_deposit_reg_v(s, ot, R_EAX, newv, oldv);
|
|
|
|
tcg_gen_movcond_tl(TCG_COND_EQ, dest, oldv, cmpv, dest, newv);
|
2016-06-27 22:01:51 +03:00
|
|
|
tcg_gen_mov_tl(cpu_cc_src, oldv);
|
2018-09-11 21:38:47 +03:00
|
|
|
tcg_gen_mov_tl(s->cc_srcT, cmpv);
|
2016-06-27 22:01:51 +03:00
|
|
|
tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
|
2013-01-24 00:30:52 +04:00
|
|
|
set_cc_op(s, CC_OP_SUBB + ot);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x1c7: /* cmpxchg8b */
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
mod = (modrm >> 6) & 3;
|
2019-03-15 06:01:42 +03:00
|
|
|
switch ((modrm >> 3) & 7) {
|
|
|
|
case 1: /* CMPXCHG8, CMPXCHG16 */
|
|
|
|
if (mod == 3) {
|
2008-05-22 13:52:38 +04:00
|
|
|
goto illegal_op;
|
2016-06-27 22:01:51 +03:00
|
|
|
}
|
2019-03-15 06:01:42 +03:00
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
if (dflag == MO_64) {
|
|
|
|
if (!(s->cpuid_ext_features & CPUID_EXT_CX16)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2022-11-09 07:22:15 +03:00
|
|
|
gen_cmpxchg16b(s, env, modrm);
|
2019-03-15 06:01:42 +03:00
|
|
|
break;
|
|
|
|
}
|
2022-11-09 07:22:15 +03:00
|
|
|
#endif
|
2019-03-15 06:01:42 +03:00
|
|
|
if (!(s->cpuid_features & CPUID_CX8)) {
|
2008-05-22 13:52:38 +04:00
|
|
|
goto illegal_op;
|
2019-03-15 06:01:42 +03:00
|
|
|
}
|
2022-11-09 07:22:15 +03:00
|
|
|
gen_cmpxchg8b(s, env, modrm);
|
2019-03-15 06:01:42 +03:00
|
|
|
break;
|
|
|
|
|
2023-06-21 01:47:31 +03:00
|
|
|
case 7: /* RDSEED, RDPID with f3 prefix */
|
2023-06-21 01:43:22 +03:00
|
|
|
if (mod != 3 ||
|
2023-06-21 01:47:31 +03:00
|
|
|
(s->prefix & (PREFIX_LOCK | PREFIX_REPNZ))) {
|
2023-06-21 01:43:22 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2023-06-21 01:47:31 +03:00
|
|
|
if (s->prefix & PREFIX_REPZ) {
|
|
|
|
if (!(s->cpuid_ext_features & CPUID_7_0_ECX_RDPID)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_rdpid(s->T0, tcg_env);
|
2023-06-21 01:47:31 +03:00
|
|
|
rm = (modrm & 7) | REX_B(s);
|
|
|
|
gen_op_mov_reg_v(s, dflag, rm, s->T0);
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_RDSEED)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
goto do_rdrand;
|
|
|
|
}
|
2023-06-21 01:43:22 +03:00
|
|
|
|
2019-03-15 06:01:42 +03:00
|
|
|
case 6: /* RDRAND */
|
|
|
|
if (mod != 3 ||
|
|
|
|
(s->prefix & (PREFIX_LOCK | PREFIX_REPZ | PREFIX_REPNZ)) ||
|
|
|
|
!(s->cpuid_ext_features & CPUID_EXT_RDRAND)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2023-06-21 01:43:22 +03:00
|
|
|
do_rdrand:
|
2023-05-23 09:08:01 +03:00
|
|
|
translator_io_start(&s->base);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_rdrand(s->T0, tcg_env);
|
2019-03-15 06:01:42 +03:00
|
|
|
rm = (modrm & 7) | REX_B(s);
|
|
|
|
gen_op_mov_reg_v(s, dflag, rm, s->T0);
|
2024-05-17 00:04:28 +03:00
|
|
|
assume_cc_op(s, CC_OP_EFLAGS);
|
2019-03-15 06:01:42 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
goto illegal_op;
|
2008-05-22 13:52:38 +04:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2007-09-17 12:09:54 +04:00
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
/**************************/
|
|
|
|
/* shifts */
|
|
|
|
case 0x1a4: /* shld imm */
|
|
|
|
op = 0;
|
|
|
|
shift = 1;
|
|
|
|
goto do_shiftd;
|
|
|
|
case 0x1a5: /* shld cl */
|
|
|
|
op = 0;
|
|
|
|
shift = 0;
|
|
|
|
goto do_shiftd;
|
|
|
|
case 0x1ac: /* shrd imm */
|
|
|
|
op = 1;
|
|
|
|
shift = 1;
|
|
|
|
goto do_shiftd;
|
|
|
|
case 0x1ad: /* shrd cl */
|
|
|
|
op = 1;
|
|
|
|
shift = 0;
|
|
|
|
do_shiftd:
|
2013-11-06 03:37:57 +04:00
|
|
|
ot = dflag;
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
mod = (modrm >> 6) & 3;
|
2005-01-04 02:50:08 +03:00
|
|
|
rm = (modrm & 7) | REX_B(s);
|
2021-05-14 18:13:09 +03:00
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
2003-10-01 00:34:21 +04:00
|
|
|
if (mod != 3) {
|
2013-11-02 22:55:59 +04:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2008-05-17 16:44:31 +04:00
|
|
|
opreg = OR_TMP0;
|
2003-10-01 00:34:21 +04:00
|
|
|
} else {
|
2008-05-17 16:44:31 +04:00
|
|
|
opreg = rm;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, s->T1, reg);
|
2007-09-17 12:09:54 +04:00
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
if (shift) {
|
2023-02-26 02:26:02 +03:00
|
|
|
TCGv imm = tcg_constant_tl(x86_ldub_code(env, s));
|
2012-10-12 17:04:10 +04:00
|
|
|
gen_shiftd_rm_T1(s, ot, opreg, op, imm);
|
2003-10-01 00:34:21 +04:00
|
|
|
} else {
|
2012-10-12 17:04:10 +04:00
|
|
|
gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
/************************/
|
|
|
|
/* bit operations */
|
|
|
|
case 0x1ba: /* bt/bts/btr/btc Gv, im */
|
2013-11-06 03:37:57 +04:00
|
|
|
ot = dflag;
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2006-04-02 23:13:41 +04:00
|
|
|
op = (modrm >> 3) & 7;
|
2003-10-01 00:34:21 +04:00
|
|
|
mod = (modrm >> 6) & 3;
|
2005-01-04 02:50:08 +03:00
|
|
|
rm = (modrm & 7) | REX_B(s);
|
2003-10-01 00:34:21 +04:00
|
|
|
if (mod != 3) {
|
2005-01-04 02:50:08 +03:00
|
|
|
s->rip_offset = 1;
|
2013-11-02 22:55:59 +04:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2016-06-27 22:02:03 +03:00
|
|
|
if (!(s->prefix & PREFIX_LOCK)) {
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T0, s->A0);
|
2016-06-27 22:02:03 +03:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
} else {
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, s->T0, rm);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
/* load shift */
|
2017-04-26 14:59:34 +03:00
|
|
|
val = x86_ldub_code(env, s);
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_movi_tl(s->T1, val);
|
2003-10-01 00:34:21 +04:00
|
|
|
if (op < 4)
|
2016-03-02 03:53:18 +03:00
|
|
|
goto unknown_op;
|
2003-10-01 00:34:21 +04:00
|
|
|
op -= 4;
|
2008-05-17 20:10:38 +04:00
|
|
|
goto bt_op;
|
2003-10-01 00:34:21 +04:00
|
|
|
case 0x1a3: /* bt Gv, Ev */
|
|
|
|
op = 0;
|
|
|
|
goto do_btx;
|
|
|
|
case 0x1ab: /* bts */
|
|
|
|
op = 1;
|
|
|
|
goto do_btx;
|
|
|
|
case 0x1b3: /* btr */
|
|
|
|
op = 2;
|
|
|
|
goto do_btx;
|
|
|
|
case 0x1bb: /* btc */
|
|
|
|
op = 3;
|
|
|
|
do_btx:
|
2013-11-06 03:37:57 +04:00
|
|
|
ot = dflag;
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2021-05-14 18:13:09 +03:00
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
2003-10-01 00:34:21 +04:00
|
|
|
mod = (modrm >> 6) & 3;
|
2005-01-04 02:50:08 +03:00
|
|
|
rm = (modrm & 7) | REX_B(s);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, MO_32, s->T1, reg);
|
2003-10-01 00:34:21 +04:00
|
|
|
if (mod != 3) {
|
2016-06-27 22:02:03 +03:00
|
|
|
AddressParts a = gen_lea_modrm_0(env, s, modrm);
|
2003-10-01 00:34:21 +04:00
|
|
|
/* specific case: we need to add a displacement */
|
2018-09-11 21:50:46 +03:00
|
|
|
gen_exts(ot, s->T1);
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_sari_tl(s->tmp0, s->T1, 3 + ot);
|
|
|
|
tcg_gen_shli_tl(s->tmp0, s->tmp0, ot);
|
2022-09-18 01:43:52 +03:00
|
|
|
tcg_gen_add_tl(s->A0, gen_lea_modrm_1(s, a, false), s->tmp0);
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
|
2016-06-27 22:02:03 +03:00
|
|
|
if (!(s->prefix & PREFIX_LOCK)) {
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T0, s->A0);
|
2016-06-27 22:02:03 +03:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
} else {
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, s->T0, rm);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2008-05-17 20:10:38 +04:00
|
|
|
bt_op:
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_andi_tl(s->T1, s->T1, (1 << (3 + ot)) - 1);
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_movi_tl(s->tmp0, 1);
|
|
|
|
tcg_gen_shl_tl(s->tmp0, s->tmp0, s->T1);
|
2016-06-27 22:02:03 +03:00
|
|
|
if (s->prefix & PREFIX_LOCK) {
|
|
|
|
switch (op) {
|
|
|
|
case 0: /* bt */
|
2023-07-14 14:16:12 +03:00
|
|
|
/* Needs no atomic ops; we suppressed the normal
|
2016-06-27 22:02:03 +03:00
|
|
|
memory load for LOCK above so do it now. */
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, ot, s->T0, s->A0);
|
2016-06-27 22:02:03 +03:00
|
|
|
break;
|
|
|
|
case 1: /* bts */
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_atomic_fetch_or_tl(s->T0, s->A0, s->tmp0,
|
2016-06-27 22:02:03 +03:00
|
|
|
s->mem_index, ot | MO_LE);
|
|
|
|
break;
|
|
|
|
case 2: /* btr */
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_not_tl(s->tmp0, s->tmp0);
|
|
|
|
tcg_gen_atomic_fetch_and_tl(s->T0, s->A0, s->tmp0,
|
2016-06-27 22:02:03 +03:00
|
|
|
s->mem_index, ot | MO_LE);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case 3: /* btc */
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_atomic_fetch_xor_tl(s->T0, s->A0, s->tmp0,
|
2016-06-27 22:02:03 +03:00
|
|
|
s->mem_index, ot | MO_LE);
|
|
|
|
break;
|
|
|
|
}
|
2018-09-11 21:10:21 +03:00
|
|
|
tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
|
2016-06-27 22:02:03 +03:00
|
|
|
} else {
|
2018-09-11 21:10:21 +03:00
|
|
|
tcg_gen_shr_tl(s->tmp4, s->T0, s->T1);
|
2016-06-27 22:02:03 +03:00
|
|
|
switch (op) {
|
|
|
|
case 0: /* bt */
|
|
|
|
/* Data already loaded; nothing to do. */
|
|
|
|
break;
|
|
|
|
case 1: /* bts */
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_or_tl(s->T0, s->T0, s->tmp0);
|
2016-06-27 22:02:03 +03:00
|
|
|
break;
|
|
|
|
case 2: /* btr */
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_andc_tl(s->T0, s->T0, s->tmp0);
|
2016-06-27 22:02:03 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
case 3: /* btc */
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_xor_tl(s->T0, s->T0, s->tmp0);
|
2016-06-27 22:02:03 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (op != 0) {
|
|
|
|
if (mod != 3) {
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, ot, s->T0, s->A0);
|
2016-06-27 22:02:03 +03:00
|
|
|
} else {
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, rm, s->T0);
|
2016-06-27 22:02:03 +03:00
|
|
|
}
|
2013-11-02 22:12:01 +04:00
|
|
|
}
|
2014-04-10 00:51:41 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Delay all CC updates until after the store above. Note that
|
|
|
|
C is the result of the test, Z is unchanged, and the others
|
|
|
|
are all undefined. */
|
|
|
|
switch (s->cc_op) {
|
|
|
|
case CC_OP_MULB ... CC_OP_MULQ:
|
|
|
|
case CC_OP_ADDB ... CC_OP_ADDQ:
|
|
|
|
case CC_OP_ADCB ... CC_OP_ADCQ:
|
|
|
|
case CC_OP_SUBB ... CC_OP_SUBQ:
|
|
|
|
case CC_OP_SBBB ... CC_OP_SBBQ:
|
|
|
|
case CC_OP_LOGICB ... CC_OP_LOGICQ:
|
|
|
|
case CC_OP_INCB ... CC_OP_INCQ:
|
|
|
|
case CC_OP_DECB ... CC_OP_DECQ:
|
|
|
|
case CC_OP_SHLB ... CC_OP_SHLQ:
|
|
|
|
case CC_OP_SARB ... CC_OP_SARQ:
|
|
|
|
case CC_OP_BMILGB ... CC_OP_BMILGQ:
|
|
|
|
/* Z was going to be computed from the non-zero status of CC_DST.
|
|
|
|
We can get that same Z value (and the new C value) by leaving
|
|
|
|
CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
|
|
|
|
same width. */
|
2018-09-11 21:10:21 +03:00
|
|
|
tcg_gen_mov_tl(cpu_cc_src, s->tmp4);
|
2014-04-10 00:51:41 +04:00
|
|
|
set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Otherwise, generate EFLAGS and replace the C bit. */
|
|
|
|
gen_compute_eflags(s);
|
2018-09-11 21:10:21 +03:00
|
|
|
tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, s->tmp4,
|
2014-04-10 00:51:41 +04:00
|
|
|
ctz32(CC_C), 1);
|
|
|
|
break;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
2013-01-22 01:32:02 +04:00
|
|
|
case 0x1bc: /* bsf / tzcnt */
|
|
|
|
case 0x1bd: /* bsr / lzcnt */
|
2013-11-06 03:37:57 +04:00
|
|
|
ot = dflag;
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2021-05-14 18:13:09 +03:00
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
2013-01-22 01:32:02 +04:00
|
|
|
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_extu(ot, s->T0);
|
2013-01-22 01:32:02 +04:00
|
|
|
|
|
|
|
/* Note that lzcnt and tzcnt are in different extensions. */
|
|
|
|
if ((prefixes & PREFIX_REPZ)
|
|
|
|
&& (b & 1
|
|
|
|
? s->cpuid_ext3_features & CPUID_EXT3_ABM
|
|
|
|
: s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
|
|
|
|
int size = 8 << ot;
|
2016-11-16 14:21:13 +03:00
|
|
|
/* For lzcnt/tzcnt, C bit is defined related to the input. */
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_mov_tl(cpu_cc_src, s->T0);
|
2013-01-22 01:32:02 +04:00
|
|
|
if (b & 1) {
|
|
|
|
/* For lzcnt, reduce the target_ulong result by the
|
|
|
|
number of zeros that we expect to find at the top. */
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_clzi_tl(s->T0, s->T0, TARGET_LONG_BITS);
|
|
|
|
tcg_gen_subi_tl(s->T0, s->T0, TARGET_LONG_BITS - size);
|
2008-05-17 22:44:58 +04:00
|
|
|
} else {
|
2016-11-16 14:21:13 +03:00
|
|
|
/* For tzcnt, a zero input must return the operand size. */
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_ctzi_tl(s->T0, s->T0, size);
|
2008-05-17 22:44:58 +04:00
|
|
|
}
|
2016-11-16 14:21:13 +03:00
|
|
|
/* For lzcnt/tzcnt, Z bit is defined related to the result. */
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_update1_cc(s);
|
2013-01-22 01:32:02 +04:00
|
|
|
set_cc_op(s, CC_OP_BMILGB + ot);
|
|
|
|
} else {
|
|
|
|
/* For bsr/bsf, only the Z bit is defined and it is related
|
|
|
|
to the input and not the result. */
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_mov_tl(cpu_cc_dst, s->T0);
|
2013-01-22 01:32:02 +04:00
|
|
|
set_cc_op(s, CC_OP_LOGICB + ot);
|
2016-11-16 14:21:13 +03:00
|
|
|
|
|
|
|
/* ??? The manual says that the output is undefined when the
|
|
|
|
input is zero, but real hardware leaves it unchanged, and
|
|
|
|
real programs appear to depend on that. Accomplish this
|
|
|
|
by passing the output as the value to return upon zero. */
|
2013-01-22 01:32:02 +04:00
|
|
|
if (b & 1) {
|
|
|
|
/* For bsr, return the bit index of the first 1 bit,
|
|
|
|
not the count of leading zeros. */
|
2018-09-11 21:50:46 +03:00
|
|
|
tcg_gen_xori_tl(s->T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
|
|
|
|
tcg_gen_clz_tl(s->T0, s->T0, s->T1);
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_xori_tl(s->T0, s->T0, TARGET_LONG_BITS - 1);
|
2013-01-22 01:32:02 +04:00
|
|
|
} else {
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_ctz_tl(s->T0, s->T0, cpu_regs[reg]);
|
2013-01-22 01:32:02 +04:00
|
|
|
}
|
2008-05-17 22:44:58 +04:00
|
|
|
}
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, reg, s->T0);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 0x130: /* wrmsr */
|
|
|
|
case 0x132: /* rdmsr */
|
2021-05-14 18:12:54 +03:00
|
|
|
if (check_cpl0(s)) {
|
2013-01-24 00:43:12 +04:00
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2007-09-23 19:28:04 +04:00
|
|
|
if (b & 2) {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_rdmsr(tcg_env);
|
2007-09-23 19:28:04 +04:00
|
|
|
} else {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_wrmsr(tcg_env);
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2007-09-23 19:28:04 +04:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x131: /* rdtsc */
|
2013-01-24 00:43:12 +04:00
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-05-23 09:08:01 +03:00
|
|
|
translator_io_start(&s->base);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_rdtsc(tcg_env);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2007-12-10 02:35:27 +03:00
|
|
|
case 0x133: /* rdpmc */
|
2013-01-24 00:43:12 +04:00
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_rdpmc(tcg_env);
|
2021-05-14 18:13:26 +03:00
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
2007-12-10 02:35:27 +03:00
|
|
|
break;
|
2004-05-29 15:08:52 +04:00
|
|
|
case 0x134: /* sysenter */
|
2023-06-19 16:41:42 +03:00
|
|
|
/* For AMD SYSENTER is not valid in long mode */
|
|
|
|
if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
|
2005-01-04 02:50:08 +03:00
|
|
|
goto illegal_op;
|
2023-06-19 16:41:42 +03:00
|
|
|
}
|
2021-05-14 18:12:58 +03:00
|
|
|
if (!PE(s)) {
|
2021-05-14 18:12:53 +03:00
|
|
|
gen_exception_gpf(s);
|
2004-05-29 15:08:52 +04:00
|
|
|
} else {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_sysenter(tcg_env);
|
2022-10-01 17:09:19 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_ONLY;
|
2004-05-29 15:08:52 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x135: /* sysexit */
|
2023-06-19 16:41:42 +03:00
|
|
|
/* For AMD SYSEXIT is not valid in long mode */
|
|
|
|
if (LMA(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1) {
|
2005-01-04 02:50:08 +03:00
|
|
|
goto illegal_op;
|
2023-06-19 16:41:42 +03:00
|
|
|
}
|
2023-06-19 16:29:12 +03:00
|
|
|
if (!PE(s) || CPL(s) != 0) {
|
2021-05-14 18:12:53 +03:00
|
|
|
gen_exception_gpf(s);
|
2004-05-29 15:08:52 +04:00
|
|
|
} else {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_sysexit(tcg_env, tcg_constant_i32(dflag - 1));
|
2022-10-01 17:09:19 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_ONLY;
|
2004-05-29 15:08:52 +04:00
|
|
|
}
|
|
|
|
break;
|
2005-01-04 02:50:08 +03:00
|
|
|
case 0x105: /* syscall */
|
2023-06-19 16:41:42 +03:00
|
|
|
/* For Intel SYSCALL is only valid in long mode */
|
|
|
|
if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2010-07-25 07:30:03 +04:00
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_syscall(tcg_env, cur_insn_len_i32(s));
|
2024-05-17 00:08:40 +03:00
|
|
|
/* condition codes are modified only in long mode */
|
|
|
|
if (LMA(s)) {
|
|
|
|
assume_cc_op(s, CC_OP_EFLAGS);
|
|
|
|
}
|
2016-12-24 23:29:33 +03:00
|
|
|
/* TF handling for the syscall insn is different. The TF bit is checked
|
|
|
|
after the syscall insn completes. This allows #DB to not be
|
|
|
|
generated after one has entered CPL0 if TF is set in FMASK. */
|
2024-05-16 19:46:55 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_RECHECK_TF;
|
2005-01-04 02:50:08 +03:00
|
|
|
break;
|
|
|
|
case 0x107: /* sysret */
|
2023-06-19 16:41:42 +03:00
|
|
|
/* For Intel SYSRET is only valid in long mode */
|
|
|
|
if (!LMA(s) && env->cpuid_vendor1 == CPUID_VENDOR_INTEL_1) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2023-06-19 16:29:12 +03:00
|
|
|
if (!PE(s) || CPL(s) != 0) {
|
2021-05-14 18:12:53 +03:00
|
|
|
gen_exception_gpf(s);
|
2005-01-04 02:50:08 +03:00
|
|
|
} else {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_sysret(tcg_env, tcg_constant_i32(dflag - 1));
|
2005-04-23 21:53:12 +04:00
|
|
|
/* condition codes are modified only in long mode */
|
2021-05-14 18:13:05 +03:00
|
|
|
if (LMA(s)) {
|
2024-05-17 00:08:40 +03:00
|
|
|
assume_cc_op(s, CC_OP_EFLAGS);
|
2013-01-24 00:30:52 +04:00
|
|
|
}
|
2016-12-07 02:06:30 +03:00
|
|
|
/* TF handling for the sysret insn is different. The TF bit is
|
|
|
|
checked after the sysret insn completes. This allows #DB to be
|
|
|
|
generated "as if" the syscall insn in userspace has just
|
|
|
|
completed. */
|
2024-05-16 19:46:55 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_RECHECK_TF;
|
2005-01-04 02:50:08 +03:00
|
|
|
}
|
|
|
|
break;
|
2003-10-01 00:34:21 +04:00
|
|
|
case 0x1a2: /* cpuid */
|
2013-01-24 00:43:12 +04:00
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_cpuid(tcg_env);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 0x100:
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
mod = (modrm >> 6) & 3;
|
|
|
|
op = (modrm >> 3) & 7;
|
|
|
|
switch(op) {
|
|
|
|
case 0: /* sldt */
|
2021-05-14 18:13:01 +03:00
|
|
|
if (!PE(s) || VM86(s))
|
2003-11-13 04:43:28 +03:00
|
|
|
goto illegal_op;
|
2022-02-07 01:36:09 +03:00
|
|
|
if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
|
|
|
|
break;
|
|
|
|
}
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_LDTR_READ);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld32u_tl(s->T0, tcg_env,
|
2015-07-09 10:15:22 +03:00
|
|
|
offsetof(CPUX86State, ldt.selector));
|
2013-11-06 03:37:57 +04:00
|
|
|
ot = mod == 3 ? dflag : MO_16;
|
2012-09-08 17:26:02 +04:00
|
|
|
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 2: /* lldt */
|
2021-05-14 18:13:01 +03:00
|
|
|
if (!PE(s) || VM86(s))
|
2003-11-13 04:43:28 +03:00
|
|
|
goto illegal_op;
|
2021-05-14 18:12:54 +03:00
|
|
|
if (check_cpl0(s)) {
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_LDTR_WRITE);
|
2013-11-02 20:54:47 +04:00
|
|
|
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_lldt(tcg_env, s->tmp2_i32);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1: /* str */
|
2021-05-14 18:13:01 +03:00
|
|
|
if (!PE(s) || VM86(s))
|
2003-11-13 04:43:28 +03:00
|
|
|
goto illegal_op;
|
2022-02-07 01:36:09 +03:00
|
|
|
if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
|
|
|
|
break;
|
|
|
|
}
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_TR_READ);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld32u_tl(s->T0, tcg_env,
|
2015-07-09 10:15:22 +03:00
|
|
|
offsetof(CPUX86State, tr.selector));
|
2013-11-06 03:37:57 +04:00
|
|
|
ot = mod == 3 ? dflag : MO_16;
|
2012-09-08 17:26:02 +04:00
|
|
|
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
|
|
|
case 3: /* ltr */
|
2021-05-14 18:13:01 +03:00
|
|
|
if (!PE(s) || VM86(s))
|
2003-11-13 04:43:28 +03:00
|
|
|
goto illegal_op;
|
2021-05-14 18:12:54 +03:00
|
|
|
if (check_cpl0(s)) {
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_TR_WRITE);
|
2013-11-02 20:54:47 +04:00
|
|
|
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_ltr(tcg_env, s->tmp2_i32);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 4: /* verr */
|
|
|
|
case 5: /* verw */
|
2021-05-14 18:13:01 +03:00
|
|
|
if (!PE(s) || VM86(s))
|
2003-11-13 04:43:28 +03:00
|
|
|
goto illegal_op;
|
2013-11-02 20:54:47 +04:00
|
|
|
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
|
2013-01-24 00:43:12 +04:00
|
|
|
gen_update_cc_op(s);
|
2012-04-29 23:47:06 +04:00
|
|
|
if (op == 4) {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_verr(tcg_env, s->T0);
|
2012-04-29 23:47:06 +04:00
|
|
|
} else {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_verw(tcg_env, s->T0);
|
2012-04-29 23:47:06 +04:00
|
|
|
}
|
2024-05-17 00:04:28 +03:00
|
|
|
assume_cc_op(s, CC_OP_EFLAGS);
|
2003-11-13 04:43:28 +03:00
|
|
|
break;
|
2003-10-01 00:34:21 +04:00
|
|
|
default:
|
2016-03-02 03:53:18 +03:00
|
|
|
goto unknown_op;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
2015-07-02 15:59:21 +03:00
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
case 0x101:
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2015-07-02 15:59:21 +03:00
|
|
|
switch (modrm) {
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(0): /* sgdt */
|
2022-02-07 01:36:09 +03:00
|
|
|
if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
|
|
|
|
break;
|
|
|
|
}
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_GDTR_READ);
|
2013-11-02 22:55:59 +04:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_ld32u_tl(s->T0,
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_env, offsetof(CPUX86State, gdt.limit));
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, MO_16, s->T0, s->A0);
|
2005-04-23 21:53:12 +04:00
|
|
|
gen_add_A0_im(s, 2);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
|
2024-04-19 22:51:47 +03:00
|
|
|
/*
|
|
|
|
* NB: Despite a confusing description in Intel CPU documentation,
|
|
|
|
* all 32-bits are written regardless of operand size.
|
|
|
|
*/
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2015-07-02 15:59:21 +03:00
|
|
|
|
|
|
|
case 0xc8: /* monitor */
|
2021-05-14 18:12:59 +03:00
|
|
|
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
2006-07-10 23:53:04 +04:00
|
|
|
}
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_add_A0_ds_seg(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_monitor(tcg_env, s->A0);
|
2006-07-10 23:53:04 +04:00
|
|
|
break;
|
2015-07-02 15:59:21 +03:00
|
|
|
|
|
|
|
case 0xc9: /* mwait */
|
2021-05-14 18:12:59 +03:00
|
|
|
if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || CPL(s) != 0) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_mwait(tcg_env, cur_insn_len_i32(s));
|
2021-05-14 18:13:26 +03:00
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xca: /* clac */
|
|
|
|
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
|
2021-05-14 18:12:59 +03:00
|
|
|
|| CPL(s) != 0) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2022-10-24 09:16:30 +03:00
|
|
|
gen_reset_eflags(s, AC_MASK);
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xcb: /* stac */
|
|
|
|
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
|
2021-05-14 18:12:59 +03:00
|
|
|
|| CPL(s) != 0) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2022-10-24 09:16:30 +03:00
|
|
|
gen_set_eflags(s, AC_MASK);
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(1): /* sidt */
|
2022-02-07 01:36:09 +03:00
|
|
|
if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
|
|
|
|
break;
|
|
|
|
}
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_IDTR_READ);
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.limit));
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, MO_16, s->T0, s->A0);
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_add_A0_im(s, 2);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
|
2024-04-19 22:51:47 +03:00
|
|
|
/*
|
|
|
|
* NB: Despite a confusing description in Intel CPU documentation,
|
|
|
|
* all 32-bits are written regardless of operand size.
|
|
|
|
*/
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, CODE64(s) + MO_32, s->T0, s->A0);
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
2015-07-02 16:53:40 +03:00
|
|
|
case 0xd0: /* xgetbv */
|
|
|
|
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|
|
|
|
|| (s->prefix & (PREFIX_LOCK | PREFIX_DATA
|
|
|
|
| PREFIX_REPZ | PREFIX_REPNZ))) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_xgetbv(s->tmp1_i64, tcg_env, s->tmp2_i32);
|
2018-09-11 21:22:31 +03:00
|
|
|
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
|
2015-07-02 16:53:40 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xd1: /* xsetbv */
|
|
|
|
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|
|
|
|
|| (s->prefix & (PREFIX_LOCK | PREFIX_DATA
|
|
|
|
| PREFIX_REPZ | PREFIX_REPNZ))) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2023-10-13 10:27:02 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_XSETBV);
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 16:53:40 +03:00
|
|
|
break;
|
|
|
|
}
|
2018-09-11 21:22:31 +03:00
|
|
|
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
|
2015-07-02 16:53:40 +03:00
|
|
|
cpu_regs[R_EDX]);
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_xsetbv(tcg_env, s->tmp2_i32, s->tmp1_i64);
|
2015-07-02 16:53:40 +03:00
|
|
|
/* End TB because translation flags may change. */
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2015-07-02 16:53:40 +03:00
|
|
|
break;
|
|
|
|
|
2015-07-02 15:59:21 +03:00
|
|
|
case 0xd8: /* VMRUN */
|
2021-05-14 18:13:22 +03:00
|
|
|
if (!SVME(s) || !PE(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_vmrun(tcg_env, tcg_constant_i32(s->aflag - 1),
|
2022-10-01 17:09:20 +03:00
|
|
|
cur_insn_len_i32(s));
|
2018-05-31 04:06:23 +03:00
|
|
|
tcg_gen_exit_tb(NULL, 0);
|
2017-07-14 11:29:42 +03:00
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2015-07-02 15:59:21 +03:00
|
|
|
|
|
|
|
case 0xd9: /* VMMCALL */
|
2021-05-14 18:13:22 +03:00
|
|
|
if (!SVME(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_vmmcall(tcg_env);
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xda: /* VMLOAD */
|
2021-05-14 18:13:22 +03:00
|
|
|
if (!SVME(s) || !PE(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_vmload(tcg_env, tcg_constant_i32(s->aflag - 1));
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xdb: /* VMSAVE */
|
2021-05-14 18:13:22 +03:00
|
|
|
if (!SVME(s) || !PE(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_vmsave(tcg_env, tcg_constant_i32(s->aflag - 1));
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xdc: /* STGI */
|
2021-05-14 18:13:22 +03:00
|
|
|
if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
|
2021-05-14 18:12:58 +03:00
|
|
|
|| !PE(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_stgi(tcg_env);
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xdd: /* CLGI */
|
2021-05-14 18:13:22 +03:00
|
|
|
if (!SVME(s) || !PE(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_clgi(tcg_env);
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xde: /* SKINIT */
|
2021-05-14 18:13:22 +03:00
|
|
|
if ((!SVME(s) && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
|
2021-05-14 18:12:58 +03:00
|
|
|
|| !PE(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_SKINIT);
|
2021-05-14 18:13:24 +03:00
|
|
|
/* If not intercepted, not implemented -- raise #UD. */
|
|
|
|
goto illegal_op;
|
2015-07-02 15:59:21 +03:00
|
|
|
|
|
|
|
case 0xdf: /* INVLPGA */
|
2021-05-14 18:13:22 +03:00
|
|
|
if (!SVME(s) || !PE(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
}
|
2021-05-14 18:13:34 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_INVLPGA);
|
|
|
|
if (s->aflag == MO_64) {
|
|
|
|
tcg_gen_mov_tl(s->A0, cpu_regs[R_EAX]);
|
|
|
|
} else {
|
|
|
|
tcg_gen_ext32u_tl(s->A0, cpu_regs[R_EAX]);
|
|
|
|
}
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_flush_page(tcg_env, s->A0);
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(2): /* lgdt */
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
}
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_GDTR_WRITE);
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2018-09-11 21:50:46 +03:00
|
|
|
gen_op_ld_v(s, MO_16, s->T1, s->A0);
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_add_A0_im(s, 2);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
|
2015-07-02 15:59:21 +03:00
|
|
|
if (dflag == MO_16) {
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
|
2015-07-02 15:59:21 +03:00
|
|
|
}
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, gdt.base));
|
|
|
|
tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, gdt.limit));
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(3): /* lidt */
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
}
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_IDTR_WRITE);
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2018-09-11 21:50:46 +03:00
|
|
|
gen_op_ld_v(s, MO_16, s->T1, s->A0);
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_add_A0_im(s, 2);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_ld_v(s, CODE64(s) + MO_32, s->T0, s->A0);
|
2015-07-02 15:59:21 +03:00
|
|
|
if (dflag == MO_16) {
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_andi_tl(s->T0, s->T0, 0xffffff);
|
2015-07-02 15:59:21 +03:00
|
|
|
}
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_st_tl(s->T0, tcg_env, offsetof(CPUX86State, idt.base));
|
|
|
|
tcg_gen_st32_tl(s->T1, tcg_env, offsetof(CPUX86State, idt.limit));
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_OP(4): /* smsw */
|
2022-02-07 01:36:09 +03:00
|
|
|
if (s->flags & HF_UMIP_MASK && !check_cpl0(s)) {
|
|
|
|
break;
|
|
|
|
}
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_READ_CR0);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_tl(s->T0, tcg_env, offsetof(CPUX86State, cr[0]));
|
2020-06-26 12:53:36 +03:00
|
|
|
/*
|
|
|
|
* In 32-bit mode, the higher 16 bits of the destination
|
|
|
|
* register are undefined. In practice CR0[31:0] is stored
|
|
|
|
* just like in 64-bit mode.
|
|
|
|
*/
|
|
|
|
mod = (modrm >> 6) & 3;
|
|
|
|
ot = (mod != 3 ? MO_16 : s->dflag);
|
2016-03-01 19:59:32 +03:00
|
|
|
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2016-02-09 16:14:28 +03:00
|
|
|
case 0xee: /* rdpkru */
|
2024-05-09 16:55:47 +03:00
|
|
|
if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
|
|
|
|
| PREFIX_REPZ | PREFIX_REPNZ)) {
|
2016-02-09 16:14:28 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_rdpkru(s->tmp1_i64, tcg_env, s->tmp2_i32);
|
2018-09-11 21:22:31 +03:00
|
|
|
tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], s->tmp1_i64);
|
2016-02-09 16:14:28 +03:00
|
|
|
break;
|
|
|
|
case 0xef: /* wrpkru */
|
2024-05-09 16:55:47 +03:00
|
|
|
if (s->prefix & (PREFIX_LOCK | PREFIX_DATA
|
|
|
|
| PREFIX_REPZ | PREFIX_REPNZ)) {
|
2016-02-09 16:14:28 +03:00
|
|
|
goto illegal_op;
|
|
|
|
}
|
2018-09-11 21:22:31 +03:00
|
|
|
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
|
2016-02-09 16:14:28 +03:00
|
|
|
cpu_regs[R_EDX]);
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_trunc_tl_i32(s->tmp2_i32, cpu_regs[R_ECX]);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_wrpkru(tcg_env, s->tmp2_i32, s->tmp1_i64);
|
2016-02-09 16:14:28 +03:00
|
|
|
break;
|
2021-05-14 18:13:31 +03:00
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_OP(6): /* lmsw */
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
|
2021-05-14 18:13:31 +03:00
|
|
|
/*
|
|
|
|
* Only the 4 lower bits of CR0 are modified.
|
|
|
|
* PE cannot be set to zero if already set to one.
|
|
|
|
*/
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_tl(s->T1, tcg_env, offsetof(CPUX86State, cr[0]));
|
2021-05-14 18:13:31 +03:00
|
|
|
tcg_gen_andi_tl(s->T0, s->T0, 0xf);
|
|
|
|
tcg_gen_andi_tl(s->T1, s->T1, ~0xe);
|
|
|
|
tcg_gen_or_tl(s->T0, s->T0, s->T1);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_write_crN(tcg_env, tcg_constant_i32(0), s->T0);
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2015-07-02 15:59:21 +03:00
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(7): /* invlpg */
|
2021-05-14 18:12:54 +03:00
|
|
|
if (!check_cpl0(s)) {
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
}
|
2021-05-14 18:13:34 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_INVLPG);
|
2015-07-02 15:59:21 +03:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_flush_page(tcg_env, s->A0);
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
|
|
|
|
case 0xf8: /* swapgs */
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
if (CODE64(s)) {
|
2021-05-14 18:12:54 +03:00
|
|
|
if (check_cpl0(s)) {
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_mov_tl(s->T0, cpu_seg_base[R_GS]);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_tl(cpu_seg_base[R_GS], tcg_env,
|
2015-07-02 15:59:21 +03:00
|
|
|
offsetof(CPUX86State, kernelgsbase));
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_st_tl(s->T0, tcg_env,
|
2015-07-02 15:59:21 +03:00
|
|
|
offsetof(CPUX86State, kernelgsbase));
|
2009-09-19 02:30:49 +04:00
|
|
|
}
|
2015-07-02 15:59:21 +03:00
|
|
|
break;
|
|
|
|
}
|
2015-12-17 22:19:21 +03:00
|
|
|
#endif
|
2015-07-02 15:59:21 +03:00
|
|
|
goto illegal_op;
|
|
|
|
|
|
|
|
case 0xf9: /* rdtscp */
|
|
|
|
if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_update_cc_op(s);
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(s);
|
2023-05-23 09:08:01 +03:00
|
|
|
translator_io_start(&s->base);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_rdtsc(tcg_env);
|
|
|
|
gen_helper_rdpid(s->T0, tcg_env);
|
2023-06-21 01:47:31 +03:00
|
|
|
gen_op_mov_reg_v(s, dflag, R_ECX, s->T0);
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2015-07-02 15:59:21 +03:00
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
default:
|
2016-03-02 03:53:18 +03:00
|
|
|
goto unknown_op;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
2015-07-02 15:59:21 +03:00
|
|
|
|
2004-01-04 18:21:33 +03:00
|
|
|
case 0x108: /* invd */
|
2023-06-17 00:58:25 +03:00
|
|
|
case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
|
2021-05-14 18:12:54 +03:00
|
|
|
if (check_cpl0(s)) {
|
2023-06-17 00:57:30 +03:00
|
|
|
gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD);
|
2004-01-04 18:21:33 +03:00
|
|
|
/* nothing to do */
|
|
|
|
}
|
|
|
|
break;
|
2003-10-01 00:34:21 +04:00
|
|
|
case 0x102: /* lar */
|
|
|
|
case 0x103: /* lsl */
|
2008-05-21 20:25:27 +04:00
|
|
|
{
|
2015-02-13 23:51:55 +03:00
|
|
|
TCGLabel *label1;
|
2008-05-25 21:26:41 +04:00
|
|
|
TCGv t0;
|
2021-05-14 18:13:01 +03:00
|
|
|
if (!PE(s) || VM86(s))
|
2008-05-21 20:25:27 +04:00
|
|
|
goto illegal_op;
|
2013-11-06 03:37:57 +04:00
|
|
|
ot = dflag != MO_16 ? MO_32 : MO_16;
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2021-05-14 18:13:09 +03:00
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
2013-11-02 20:54:47 +04:00
|
|
|
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
|
2023-01-30 03:43:49 +03:00
|
|
|
t0 = tcg_temp_new();
|
2013-01-24 00:43:12 +04:00
|
|
|
gen_update_cc_op(s);
|
2012-04-29 23:47:06 +04:00
|
|
|
if (b == 0x102) {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_lar(t0, tcg_env, s->T0);
|
2012-04-29 23:47:06 +04:00
|
|
|
} else {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_lsl(t0, tcg_env, s->T0);
|
2012-04-29 23:47:06 +04:00
|
|
|
}
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_andi_tl(s->tmp0, cpu_cc_src, CC_Z);
|
2008-05-21 20:25:27 +04:00
|
|
|
label1 = gen_new_label();
|
2018-09-11 21:07:57 +03:00
|
|
|
tcg_gen_brcondi_tl(TCG_COND_EQ, s->tmp0, 0, label1);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, reg, t0);
|
2008-05-21 20:25:27 +04:00
|
|
|
gen_set_label(label1);
|
2013-01-24 00:30:52 +04:00
|
|
|
set_cc_op(s, CC_OP_EFLAGS);
|
2008-05-21 20:25:27 +04:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
break;
|
2015-07-06 21:10:23 +03:00
|
|
|
case 0x11a:
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2015-07-06 21:10:23 +03:00
|
|
|
if (s->flags & HF_MPX_EN_MASK) {
|
|
|
|
mod = (modrm >> 6) & 3;
|
2021-05-14 18:13:09 +03:00
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
2015-07-06 21:37:00 +03:00
|
|
|
if (prefixes & PREFIX_REPZ) {
|
|
|
|
/* bndcl */
|
|
|
|
if (reg >= 4
|
|
|
|
|| (prefixes & PREFIX_LOCK)
|
|
|
|
|| s->aflag == MO_16) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
|
|
|
|
} else if (prefixes & PREFIX_REPNZ) {
|
|
|
|
/* bndcu */
|
|
|
|
if (reg >= 4
|
|
|
|
|| (prefixes & PREFIX_LOCK)
|
|
|
|
|| s->aflag == MO_16) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
TCGv_i64 notu = tcg_temp_new_i64();
|
|
|
|
tcg_gen_not_i64(notu, cpu_bndu[reg]);
|
|
|
|
gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
|
|
|
|
} else if (prefixes & PREFIX_DATA) {
|
2015-07-06 21:10:23 +03:00
|
|
|
/* bndmov -- from reg/mem */
|
|
|
|
if (reg >= 4 || s->aflag == MO_16) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
if (mod == 3) {
|
|
|
|
int reg2 = (modrm & 7) | REX_B(s);
|
|
|
|
if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
if (s->flags & HF_MPX_IU_MASK) {
|
|
|
|
tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
|
|
|
|
tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
gen_lea_modrm(env, s, modrm);
|
|
|
|
if (CODE64(s)) {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
|
2022-01-07 00:00:51 +03:00
|
|
|
s->mem_index, MO_LEUQ);
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_addi_tl(s->A0, s->A0, 8);
|
|
|
|
tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
|
2022-01-07 00:00:51 +03:00
|
|
|
s->mem_index, MO_LEUQ);
|
2015-07-06 21:10:23 +03:00
|
|
|
} else {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_qemu_ld_i64(cpu_bndl[reg], s->A0,
|
2015-07-06 21:10:23 +03:00
|
|
|
s->mem_index, MO_LEUL);
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_addi_tl(s->A0, s->A0, 4);
|
|
|
|
tcg_gen_qemu_ld_i64(cpu_bndu[reg], s->A0,
|
2015-07-06 21:10:23 +03:00
|
|
|
s->mem_index, MO_LEUL);
|
|
|
|
}
|
|
|
|
/* bnd registers are now in-use */
|
|
|
|
gen_set_hflag(s, HF_MPX_IU_MASK);
|
|
|
|
}
|
2015-07-07 16:08:41 +03:00
|
|
|
} else if (mod != 3) {
|
|
|
|
/* bndldx */
|
|
|
|
AddressParts a = gen_lea_modrm_0(env, s, modrm);
|
|
|
|
if (reg >= 4
|
|
|
|
|| (prefixes & PREFIX_LOCK)
|
|
|
|
|| s->aflag == MO_16
|
|
|
|
|| a.base < -1) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
if (a.base >= 0) {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
|
2015-07-07 16:08:41 +03:00
|
|
|
} else {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_movi_tl(s->A0, 0);
|
2015-07-07 16:08:41 +03:00
|
|
|
}
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
|
2015-07-07 16:08:41 +03:00
|
|
|
if (a.index >= 0) {
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
|
2015-07-07 16:08:41 +03:00
|
|
|
} else {
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_movi_tl(s->T0, 0);
|
2015-07-07 16:08:41 +03:00
|
|
|
}
|
|
|
|
if (CODE64(s)) {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_bndldx64(cpu_bndl[reg], tcg_env, s->A0, s->T0);
|
|
|
|
tcg_gen_ld_i64(cpu_bndu[reg], tcg_env,
|
2015-07-07 16:08:41 +03:00
|
|
|
offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
|
|
|
|
} else {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_bndldx32(cpu_bndu[reg], tcg_env, s->A0, s->T0);
|
2015-07-07 16:08:41 +03:00
|
|
|
tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
|
|
|
|
tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
|
|
|
|
}
|
|
|
|
gen_set_hflag(s, HF_MPX_IU_MASK);
|
2015-07-06 21:10:23 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
gen_nop_modrm(env, s, modrm);
|
|
|
|
break;
|
2015-07-09 10:22:46 +03:00
|
|
|
case 0x11b:
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2015-07-09 10:22:46 +03:00
|
|
|
if (s->flags & HF_MPX_EN_MASK) {
|
|
|
|
mod = (modrm >> 6) & 3;
|
2021-05-14 18:13:09 +03:00
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
2015-07-09 10:22:46 +03:00
|
|
|
if (mod != 3 && (prefixes & PREFIX_REPZ)) {
|
|
|
|
/* bndmk */
|
|
|
|
if (reg >= 4
|
|
|
|
|| (prefixes & PREFIX_LOCK)
|
|
|
|
|| s->aflag == MO_16) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
AddressParts a = gen_lea_modrm_0(env, s, modrm);
|
|
|
|
if (a.base >= 0) {
|
|
|
|
tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
|
|
|
|
if (!CODE64(s)) {
|
|
|
|
tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
|
|
|
|
}
|
|
|
|
} else if (a.base == -1) {
|
|
|
|
/* no base register has lower bound of 0 */
|
|
|
|
tcg_gen_movi_i64(cpu_bndl[reg], 0);
|
|
|
|
} else {
|
|
|
|
/* rip-relative generates #ud */
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2022-09-18 01:43:52 +03:00
|
|
|
tcg_gen_not_tl(s->A0, gen_lea_modrm_1(s, a, false));
|
2015-07-09 10:22:46 +03:00
|
|
|
if (!CODE64(s)) {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_ext32u_tl(s->A0, s->A0);
|
2015-07-09 10:22:46 +03:00
|
|
|
}
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_extu_tl_i64(cpu_bndu[reg], s->A0);
|
2015-07-09 10:22:46 +03:00
|
|
|
/* bnd registers are now in-use */
|
|
|
|
gen_set_hflag(s, HF_MPX_IU_MASK);
|
|
|
|
break;
|
2015-07-06 21:37:00 +03:00
|
|
|
} else if (prefixes & PREFIX_REPNZ) {
|
|
|
|
/* bndcn */
|
|
|
|
if (reg >= 4
|
|
|
|
|| (prefixes & PREFIX_LOCK)
|
|
|
|
|| s->aflag == MO_16) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
|
2015-07-06 21:10:23 +03:00
|
|
|
} else if (prefixes & PREFIX_DATA) {
|
|
|
|
/* bndmov -- to reg/mem */
|
|
|
|
if (reg >= 4 || s->aflag == MO_16) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
if (mod == 3) {
|
|
|
|
int reg2 = (modrm & 7) | REX_B(s);
|
|
|
|
if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
if (s->flags & HF_MPX_IU_MASK) {
|
|
|
|
tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
|
|
|
|
tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
gen_lea_modrm(env, s, modrm);
|
|
|
|
if (CODE64(s)) {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
|
2022-01-07 00:00:51 +03:00
|
|
|
s->mem_index, MO_LEUQ);
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_addi_tl(s->A0, s->A0, 8);
|
|
|
|
tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
|
2022-01-07 00:00:51 +03:00
|
|
|
s->mem_index, MO_LEUQ);
|
2015-07-06 21:10:23 +03:00
|
|
|
} else {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_qemu_st_i64(cpu_bndl[reg], s->A0,
|
2015-07-06 21:10:23 +03:00
|
|
|
s->mem_index, MO_LEUL);
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_addi_tl(s->A0, s->A0, 4);
|
|
|
|
tcg_gen_qemu_st_i64(cpu_bndu[reg], s->A0,
|
2015-07-06 21:10:23 +03:00
|
|
|
s->mem_index, MO_LEUL);
|
|
|
|
}
|
|
|
|
}
|
2015-07-07 16:08:41 +03:00
|
|
|
} else if (mod != 3) {
|
|
|
|
/* bndstx */
|
|
|
|
AddressParts a = gen_lea_modrm_0(env, s, modrm);
|
|
|
|
if (reg >= 4
|
|
|
|
|| (prefixes & PREFIX_LOCK)
|
|
|
|
|| s->aflag == MO_16
|
|
|
|
|| a.base < -1) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
if (a.base >= 0) {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_addi_tl(s->A0, cpu_regs[a.base], a.disp);
|
2015-07-07 16:08:41 +03:00
|
|
|
} else {
|
2018-09-11 21:41:57 +03:00
|
|
|
tcg_gen_movi_tl(s->A0, 0);
|
2015-07-07 16:08:41 +03:00
|
|
|
}
|
2018-09-11 21:41:57 +03:00
|
|
|
gen_lea_v_seg(s, s->aflag, s->A0, a.def_seg, s->override);
|
2015-07-07 16:08:41 +03:00
|
|
|
if (a.index >= 0) {
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_mov_tl(s->T0, cpu_regs[a.index]);
|
2015-07-07 16:08:41 +03:00
|
|
|
} else {
|
2018-09-11 21:48:41 +03:00
|
|
|
tcg_gen_movi_tl(s->T0, 0);
|
2015-07-07 16:08:41 +03:00
|
|
|
}
|
|
|
|
if (CODE64(s)) {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_bndstx64(tcg_env, s->A0, s->T0,
|
2015-07-07 16:08:41 +03:00
|
|
|
cpu_bndl[reg], cpu_bndu[reg]);
|
|
|
|
} else {
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_bndstx32(tcg_env, s->A0, s->T0,
|
2015-07-07 16:08:41 +03:00
|
|
|
cpu_bndl[reg], cpu_bndu[reg]);
|
|
|
|
}
|
2015-07-09 10:22:46 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
gen_nop_modrm(env, s, modrm);
|
|
|
|
break;
|
2021-05-14 18:13:31 +03:00
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
case 0x120: /* mov reg, crN */
|
|
|
|
case 0x122: /* mov crN, reg */
|
2021-05-14 18:13:31 +03:00
|
|
|
if (!check_cpl0(s)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
modrm = x86_ldub_code(env, s);
|
|
|
|
/*
|
|
|
|
* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
|
|
|
|
* AMD documentation (24594.pdf) and testing of Intel 386 and 486
|
|
|
|
* processors all show that the mod bits are assumed to be 1's,
|
|
|
|
* regardless of actual values.
|
|
|
|
*/
|
|
|
|
rm = (modrm & 7) | REX_B(s);
|
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
|
|
|
switch (reg) {
|
|
|
|
case 0:
|
|
|
|
if ((prefixes & PREFIX_LOCK) &&
|
2009-09-19 02:30:47 +04:00
|
|
|
(s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
|
|
|
|
reg = 8;
|
|
|
|
}
|
2021-05-14 18:13:31 +03:00
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
case 3:
|
|
|
|
case 4:
|
2021-06-02 06:55:11 +03:00
|
|
|
case 8:
|
2021-05-14 18:13:31 +03:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto unknown_op;
|
|
|
|
}
|
|
|
|
ot = (CODE64(s) ? MO_64 : MO_32);
|
|
|
|
|
2023-05-23 09:08:01 +03:00
|
|
|
translator_io_start(&s->base);
|
2021-05-14 18:13:31 +03:00
|
|
|
if (b & 2) {
|
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0 + reg);
|
|
|
|
gen_op_mov_v_reg(s, ot, s->T0, rm);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_write_crN(tcg_env, tcg_constant_i32(reg), s->T0);
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2021-05-14 18:13:31 +03:00
|
|
|
} else {
|
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_READ_CR0 + reg);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_read_crN(s->T0, tcg_env, tcg_constant_i32(reg));
|
2021-05-14 18:13:31 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, rm, s->T0);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
2021-05-14 18:13:31 +03:00
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
case 0x121: /* mov reg, drN */
|
|
|
|
case 0x123: /* mov drN, reg */
|
2021-05-14 18:12:54 +03:00
|
|
|
if (check_cpl0(s)) {
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2012-08-23 10:24:39 +04:00
|
|
|
/* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
|
|
|
|
* AMD documentation (24594.pdf) and testing of
|
|
|
|
* intel 386 and 486 processors all show that the mod bits
|
|
|
|
* are assumed to be 1's, regardless of actual values.
|
|
|
|
*/
|
2005-01-04 02:50:08 +03:00
|
|
|
rm = (modrm & 7) | REX_B(s);
|
2021-05-14 18:13:09 +03:00
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
2005-01-04 02:50:08 +03:00
|
|
|
if (CODE64(s))
|
2013-11-02 20:54:47 +04:00
|
|
|
ot = MO_64;
|
2005-01-04 02:50:08 +03:00
|
|
|
else
|
2013-11-02 20:54:47 +04:00
|
|
|
ot = MO_32;
|
2015-09-15 21:45:13 +03:00
|
|
|
if (reg >= 8) {
|
2003-10-01 00:34:21 +04:00
|
|
|
goto illegal_op;
|
2015-09-15 21:45:13 +03:00
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
if (b & 2) {
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_WRITE_DR0 + reg);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_v_reg(s, ot, s->T0, rm);
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_movi_i32(s->tmp2_i32, reg);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_set_dr(tcg_env, s->tmp2_i32, s->T0);
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2003-10-01 00:34:21 +04:00
|
|
|
} else {
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_READ_DR0 + reg);
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_movi_i32(s->tmp2_i32, reg);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_get_dr(s->T0, tcg_env, s->tmp2_i32);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, rm, s->T0);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 0x106: /* clts */
|
2021-05-14 18:12:54 +03:00
|
|
|
if (check_cpl0(s)) {
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_WRITE_CR0);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_clts(tcg_env);
|
2004-02-26 02:17:58 +03:00
|
|
|
/* abort block because static cpu state changed */
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
break;
|
2008-10-04 07:27:44 +04:00
|
|
|
/* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
|
2005-01-08 21:58:29 +03:00
|
|
|
case 0x1ae:
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2015-11-17 17:08:57 +03:00
|
|
|
switch (modrm) {
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(0): /* fxsave */
|
2015-11-17 17:08:57 +03:00
|
|
|
if (!(s->cpuid_features & CPUID_FXSR)
|
|
|
|
|| (prefixes & PREFIX_LOCK)) {
|
2005-01-04 02:50:08 +03:00
|
|
|
goto illegal_op;
|
2015-11-17 17:08:57 +03:00
|
|
|
}
|
2009-10-03 00:28:57 +04:00
|
|
|
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
|
2022-10-01 17:09:12 +03:00
|
|
|
gen_exception(s, EXCP07_PREX);
|
2006-02-04 20:40:20 +03:00
|
|
|
break;
|
|
|
|
}
|
2013-11-02 22:55:59 +04:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fxsave(tcg_env, s->A0);
|
2005-01-08 21:58:29 +03:00
|
|
|
break;
|
2015-11-17 17:08:57 +03:00
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(1): /* fxrstor */
|
2015-11-17 17:08:57 +03:00
|
|
|
if (!(s->cpuid_features & CPUID_FXSR)
|
|
|
|
|| (prefixes & PREFIX_LOCK)) {
|
2005-01-04 02:50:08 +03:00
|
|
|
goto illegal_op;
|
2015-11-17 17:08:57 +03:00
|
|
|
}
|
2009-10-03 00:28:57 +04:00
|
|
|
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
|
2022-10-01 17:09:12 +03:00
|
|
|
gen_exception(s, EXCP07_PREX);
|
2006-02-04 20:40:20 +03:00
|
|
|
break;
|
|
|
|
}
|
2013-11-02 22:55:59 +04:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_fxrstor(tcg_env, s->A0);
|
2005-01-08 21:58:29 +03:00
|
|
|
break;
|
2015-11-17 17:08:57 +03:00
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(2): /* ldmxcsr */
|
2015-11-17 17:08:57 +03:00
|
|
|
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2005-01-08 21:58:29 +03:00
|
|
|
if (s->flags & HF_TS_MASK) {
|
2022-10-01 17:09:12 +03:00
|
|
|
gen_exception(s, EXCP07_PREX);
|
2005-01-08 21:58:29 +03:00
|
|
|
break;
|
2005-01-04 02:50:08 +03:00
|
|
|
}
|
2013-11-02 22:55:59 +04:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_qemu_ld_i32(s->tmp2_i32, s->A0, s->mem_index, MO_LEUL);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
|
2005-01-08 21:58:29 +03:00
|
|
|
break;
|
2015-11-17 17:08:57 +03:00
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(3): /* stmxcsr */
|
2015-11-17 17:08:57 +03:00
|
|
|
if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
|
2005-01-08 21:58:29 +03:00
|
|
|
goto illegal_op;
|
2015-11-17 17:08:57 +03:00
|
|
|
}
|
|
|
|
if (s->flags & HF_TS_MASK) {
|
2022-10-01 17:09:12 +03:00
|
|
|
gen_exception(s, EXCP07_PREX);
|
2015-11-17 17:08:57 +03:00
|
|
|
break;
|
|
|
|
}
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_update_mxcsr(tcg_env);
|
2015-11-17 17:08:57 +03:00
|
|
|
gen_lea_modrm(env, s, modrm);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_op_st_v(s, MO_32, s->T0, s->A0);
|
2005-01-08 21:58:29 +03:00
|
|
|
break;
|
2015-11-17 17:08:57 +03:00
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(4): /* xsave */
|
2015-07-02 16:53:40 +03:00
|
|
|
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|
|
|
|
|| (prefixes & (PREFIX_LOCK | PREFIX_DATA
|
|
|
|
| PREFIX_REPZ | PREFIX_REPNZ))) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_lea_modrm(env, s, modrm);
|
2018-09-11 21:22:31 +03:00
|
|
|
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
|
2015-07-02 16:53:40 +03:00
|
|
|
cpu_regs[R_EDX]);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_xsave(tcg_env, s->A0, s->tmp1_i64);
|
2015-07-02 16:53:40 +03:00
|
|
|
break;
|
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(5): /* xrstor */
|
2015-07-02 16:53:40 +03:00
|
|
|
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|
|
|
|
|| (prefixes & (PREFIX_LOCK | PREFIX_DATA
|
|
|
|
| PREFIX_REPZ | PREFIX_REPNZ))) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_lea_modrm(env, s, modrm);
|
2018-09-11 21:22:31 +03:00
|
|
|
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
|
2015-07-02 16:53:40 +03:00
|
|
|
cpu_regs[R_EDX]);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_xrstor(tcg_env, s->A0, s->tmp1_i64);
|
2015-07-02 17:57:14 +03:00
|
|
|
/* XRSTOR is how MPX is enabled, which changes how
|
|
|
|
we translate. Thus we need to end the TB. */
|
2022-10-01 17:09:18 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_NEXT;
|
2015-07-02 16:53:40 +03:00
|
|
|
break;
|
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
|
2015-11-17 17:08:57 +03:00
|
|
|
if (prefixes & PREFIX_LOCK) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
if (prefixes & PREFIX_DATA) {
|
2015-11-05 00:24:45 +03:00
|
|
|
/* clwb */
|
2015-11-17 17:08:57 +03:00
|
|
|
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
|
2015-11-05 00:24:45 +03:00
|
|
|
goto illegal_op;
|
2015-11-17 17:08:57 +03:00
|
|
|
}
|
2015-11-05 00:24:45 +03:00
|
|
|
gen_nop_modrm(env, s, modrm);
|
2015-07-02 17:21:23 +03:00
|
|
|
} else {
|
|
|
|
/* xsaveopt */
|
|
|
|
if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
|
|
|
|
|| (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
|
|
|
|
|| (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
gen_lea_modrm(env, s, modrm);
|
2018-09-11 21:22:31 +03:00
|
|
|
tcg_gen_concat_tl_i64(s->tmp1_i64, cpu_regs[R_EAX],
|
2015-07-02 17:21:23 +03:00
|
|
|
cpu_regs[R_EDX]);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_xsaveopt(tcg_env, s->A0, s->tmp1_i64);
|
2015-11-17 17:08:57 +03:00
|
|
|
}
|
2015-07-02 17:21:23 +03:00
|
|
|
break;
|
2015-11-17 17:08:57 +03:00
|
|
|
|
2016-03-01 18:12:14 +03:00
|
|
|
CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
|
2015-11-17 17:08:57 +03:00
|
|
|
if (prefixes & PREFIX_LOCK) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
if (prefixes & PREFIX_DATA) {
|
|
|
|
/* clflushopt */
|
|
|
|
if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2015-11-05 00:24:45 +03:00
|
|
|
} else {
|
2015-11-17 17:08:57 +03:00
|
|
|
/* clflush */
|
|
|
|
if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
|
|
|
|
|| !(s->cpuid_features & CPUID_CLFLUSH)) {
|
2015-11-05 00:24:45 +03:00
|
|
|
goto illegal_op;
|
2015-11-17 17:08:57 +03:00
|
|
|
}
|
2015-11-05 00:24:45 +03:00
|
|
|
}
|
2015-11-17 17:08:57 +03:00
|
|
|
gen_nop_modrm(env, s, modrm);
|
2015-11-05 00:24:45 +03:00
|
|
|
break;
|
2015-11-17 17:08:57 +03:00
|
|
|
|
2015-11-18 14:55:47 +03:00
|
|
|
case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
|
2017-09-28 20:17:06 +03:00
|
|
|
case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
|
2015-11-18 14:55:47 +03:00
|
|
|
case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
|
2017-09-28 20:17:06 +03:00
|
|
|
case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
|
2015-11-18 14:55:47 +03:00
|
|
|
if (CODE64(s)
|
|
|
|
&& (prefixes & PREFIX_REPZ)
|
|
|
|
&& !(prefixes & PREFIX_LOCK)
|
|
|
|
&& (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
|
|
|
|
TCGv base, treg, src, dst;
|
|
|
|
|
|
|
|
/* Preserve hflags bits by testing CR4 at runtime. */
|
2018-09-11 21:17:18 +03:00
|
|
|
tcg_gen_movi_i32(s->tmp2_i32, CR4_FSGSBASE_MASK);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_cr4_testbit(tcg_env, s->tmp2_i32);
|
2015-11-18 14:55:47 +03:00
|
|
|
|
|
|
|
base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
|
|
|
|
treg = cpu_regs[(modrm & 7) | REX_B(s)];
|
|
|
|
|
|
|
|
if (modrm & 0x10) {
|
|
|
|
/* wr*base */
|
|
|
|
dst = base, src = treg;
|
|
|
|
} else {
|
|
|
|
/* rd*base */
|
|
|
|
dst = treg, src = base;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->dflag == MO_32) {
|
|
|
|
tcg_gen_ext32u_tl(dst, src);
|
|
|
|
} else {
|
|
|
|
tcg_gen_mov_tl(dst, src);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2016-03-02 03:53:18 +03:00
|
|
|
goto unknown_op;
|
2015-11-18 14:55:47 +03:00
|
|
|
|
2024-05-08 18:44:12 +03:00
|
|
|
case 0xf8 ... 0xff: /* sfence */
|
2016-05-16 12:11:29 +03:00
|
|
|
if (!(s->cpuid_features & CPUID_SSE)
|
|
|
|
|| (prefixes & PREFIX_LOCK)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
2016-07-14 23:20:26 +03:00
|
|
|
tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
|
2016-05-16 12:11:29 +03:00
|
|
|
break;
|
2015-11-17 17:08:57 +03:00
|
|
|
case 0xe8 ... 0xef: /* lfence */
|
2016-07-14 23:20:26 +03:00
|
|
|
if (!(s->cpuid_features & CPUID_SSE)
|
|
|
|
|| (prefixes & PREFIX_LOCK)) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
|
|
|
|
break;
|
2015-11-17 17:08:57 +03:00
|
|
|
case 0xf0 ... 0xf7: /* mfence */
|
|
|
|
if (!(s->cpuid_features & CPUID_SSE2)
|
|
|
|
|| (prefixes & PREFIX_LOCK)) {
|
|
|
|
goto illegal_op;
|
2005-07-23 21:41:26 +04:00
|
|
|
}
|
2016-07-14 23:20:26 +03:00
|
|
|
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
|
2005-07-23 21:41:26 +04:00
|
|
|
break;
|
2015-11-17 17:08:57 +03:00
|
|
|
|
2005-01-08 21:58:29 +03:00
|
|
|
default:
|
2016-03-02 03:53:18 +03:00
|
|
|
goto unknown_op;
|
2005-01-04 02:50:08 +03:00
|
|
|
}
|
|
|
|
break;
|
2015-11-17 17:08:57 +03:00
|
|
|
|
2006-09-24 22:41:56 +04:00
|
|
|
case 0x1aa: /* rsm */
|
2021-05-14 18:13:29 +03:00
|
|
|
gen_svm_check_intercept(s, SVM_EXIT_RSM);
|
2006-09-24 22:41:56 +04:00
|
|
|
if (!(s->flags & HF_SMM_MASK))
|
|
|
|
goto illegal_op;
|
2021-03-22 16:27:47 +03:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
/* we should not be in SMM mode */
|
|
|
|
g_assert_not_reached();
|
|
|
|
#else
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_rsm(tcg_env);
|
2024-05-17 00:08:40 +03:00
|
|
|
assume_cc_op(s, CC_OP_EFLAGS);
|
2021-03-22 16:27:47 +03:00
|
|
|
#endif /* CONFIG_USER_ONLY */
|
2022-10-01 17:09:19 +03:00
|
|
|
s->base.is_jmp = DISAS_EOB_ONLY;
|
2006-09-24 22:41:56 +04:00
|
|
|
break;
|
2008-10-04 07:27:44 +04:00
|
|
|
case 0x1b8: /* SSE4.2 popcnt */
|
|
|
|
if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
|
|
|
|
PREFIX_REPZ)
|
|
|
|
goto illegal_op;
|
|
|
|
if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
|
|
|
|
goto illegal_op;
|
|
|
|
|
2017-04-26 14:59:34 +03:00
|
|
|
modrm = x86_ldub_code(env, s);
|
2021-05-14 18:13:09 +03:00
|
|
|
reg = ((modrm >> 3) & 7) | REX_R(s);
|
2008-10-04 07:27:44 +04:00
|
|
|
|
2024-05-09 13:38:10 +03:00
|
|
|
ot = dflag;
|
2012-09-08 17:26:02 +04:00
|
|
|
gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
|
2018-09-11 21:48:41 +03:00
|
|
|
gen_extu(ot, s->T0);
|
|
|
|
tcg_gen_mov_tl(cpu_cc_src, s->T0);
|
|
|
|
tcg_gen_ctpop_tl(s->T0, s->T0);
|
2018-09-11 23:07:54 +03:00
|
|
|
gen_op_mov_reg_v(s, ot, reg, s->T0);
|
2008-10-04 07:32:00 +04:00
|
|
|
|
2016-11-21 14:18:53 +03:00
|
|
|
set_cc_op(s, CC_OP_POPCNT);
|
2008-10-04 07:27:44 +04:00
|
|
|
break;
|
2003-10-01 00:34:21 +04:00
|
|
|
default:
|
2024-04-09 18:31:23 +03:00
|
|
|
g_assert_not_reached();
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
2023-12-22 19:30:06 +03:00
|
|
|
return;
|
2003-10-01 00:34:21 +04:00
|
|
|
illegal_op:
|
2016-03-02 03:53:18 +03:00
|
|
|
gen_illegal_opcode(s);
|
2023-12-22 19:30:06 +03:00
|
|
|
return;
|
2016-03-02 03:53:18 +03:00
|
|
|
unknown_op:
|
|
|
|
gen_unknown_opcode(env, s);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2024-04-09 18:31:23 +03:00
|
|
|
#include "decode-new.h"
|
|
|
|
#include "emit.c.inc"
|
|
|
|
#include "decode-new.c.inc"
|
|
|
|
|
2015-03-05 18:38:48 +03:00
|
|
|
void tcg_x86_init(void)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2013-11-06 10:38:38 +04:00
|
|
|
static const char reg_names[CPU_NB_REGS][4] = {
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
[R_EAX] = "rax",
|
|
|
|
[R_EBX] = "rbx",
|
|
|
|
[R_ECX] = "rcx",
|
|
|
|
[R_EDX] = "rdx",
|
|
|
|
[R_ESI] = "rsi",
|
|
|
|
[R_EDI] = "rdi",
|
|
|
|
[R_EBP] = "rbp",
|
|
|
|
[R_ESP] = "rsp",
|
|
|
|
[8] = "r8",
|
|
|
|
[9] = "r9",
|
|
|
|
[10] = "r10",
|
|
|
|
[11] = "r11",
|
|
|
|
[12] = "r12",
|
|
|
|
[13] = "r13",
|
|
|
|
[14] = "r14",
|
|
|
|
[15] = "r15",
|
|
|
|
#else
|
|
|
|
[R_EAX] = "eax",
|
|
|
|
[R_EBX] = "ebx",
|
|
|
|
[R_ECX] = "ecx",
|
|
|
|
[R_EDX] = "edx",
|
|
|
|
[R_ESI] = "esi",
|
|
|
|
[R_EDI] = "edi",
|
|
|
|
[R_EBP] = "ebp",
|
|
|
|
[R_ESP] = "esp",
|
2022-10-01 17:09:33 +03:00
|
|
|
#endif
|
|
|
|
};
|
|
|
|
static const char eip_name[] = {
|
|
|
|
#ifdef TARGET_X86_64
|
|
|
|
"rip"
|
|
|
|
#else
|
|
|
|
"eip"
|
2013-11-06 10:38:38 +04:00
|
|
|
#endif
|
|
|
|
};
|
2015-12-17 22:19:21 +03:00
|
|
|
static const char seg_base_names[6][8] = {
|
|
|
|
[R_CS] = "cs_base",
|
|
|
|
[R_DS] = "ds_base",
|
|
|
|
[R_ES] = "es_base",
|
|
|
|
[R_FS] = "fs_base",
|
|
|
|
[R_GS] = "gs_base",
|
|
|
|
[R_SS] = "ss_base",
|
|
|
|
};
|
2015-07-09 10:22:46 +03:00
|
|
|
static const char bnd_regl_names[4][8] = {
|
|
|
|
"bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
|
|
|
|
};
|
|
|
|
static const char bnd_regu_names[4][8] = {
|
|
|
|
"bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
|
|
|
|
};
|
2013-11-06 10:38:38 +04:00
|
|
|
int i;
|
|
|
|
|
2023-09-14 02:37:36 +03:00
|
|
|
cpu_cc_op = tcg_global_mem_new_i32(tcg_env,
|
2012-03-14 04:38:21 +04:00
|
|
|
offsetof(CPUX86State, cc_op), "cc_op");
|
2023-09-14 02:37:36 +03:00
|
|
|
cpu_cc_dst = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_dst),
|
2008-11-17 17:43:54 +03:00
|
|
|
"cc_dst");
|
2023-09-14 02:37:36 +03:00
|
|
|
cpu_cc_src = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src),
|
target-i386: optimize flags checking after sub using CC_SRCT
After a comparison or subtraction, the original value of the LHS will
currently be reconstructed using an addition. However, in most cases
it is already available: store it in a temp-local variable and save 1
or 2 TCG ops (2 if the result of the addition needs to be extended).
The temp-local can be declared dead as soon as the cc_op changes again,
or also before the translation block ends because gen_prepare_cc will
always make a copy before returning it. All this magic, plus copy
propagation and dead-code elimination, ensures that the temp local will
(almost) never be spilled.
Example (cmp $0x21,%rax + jbe):
Before After
----------------------------------------------------------------------------
movi_i64 tmp1,$0x21 movi_i64 tmp1,$0x21
movi_i64 cc_src,$0x21 movi_i64 cc_src,$0x21
sub_i64 cc_dst,rax,tmp1 sub_i64 cc_dst,rax,tmp1
add_i64 tmp7,cc_dst,cc_src
movi_i32 cc_op,$0x11 movi_i32 cc_op,$0x11
brcond_i64 tmp7,cc_src,leu,$0x0 discard loc11
brcond_i64 rax,cc_src,leu,$0x0
Before After
----------------------------------------------------------------------------
mov (%r14),%rbp mov (%r14),%rbp
mov %rbp,%rbx mov %rbp,%rbx
sub $0x21,%rbx sub $0x21,%rbx
lea 0x21(%rbx),%r12
movl $0x11,0xa0(%r14) movl $0x11,0xa0(%r14)
movq $0x21,0x90(%r14) movq $0x21,0x90(%r14)
mov %rbx,0x98(%r14) mov %rbx,0x98(%r14)
cmp $0x21,%r12 | cmp $0x21,%rbp
jbe ... jbe ...
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2013-01-24 03:43:03 +04:00
|
|
|
"cc_src");
|
2023-09-14 02:37:36 +03:00
|
|
|
cpu_cc_src2 = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, cc_src2),
|
2013-01-24 04:03:16 +04:00
|
|
|
"cc_src2");
|
2023-09-14 02:37:36 +03:00
|
|
|
cpu_eip = tcg_global_mem_new(tcg_env, offsetof(CPUX86State, eip), eip_name);
|
2008-05-22 20:11:04 +04:00
|
|
|
|
2013-11-06 10:38:38 +04:00
|
|
|
for (i = 0; i < CPU_NB_REGS; ++i) {
|
2023-09-14 02:37:36 +03:00
|
|
|
cpu_regs[i] = tcg_global_mem_new(tcg_env,
|
2013-11-06 10:38:38 +04:00
|
|
|
offsetof(CPUX86State, regs[i]),
|
|
|
|
reg_names[i]);
|
|
|
|
}
|
2015-08-10 18:27:02 +03:00
|
|
|
|
2015-12-17 22:19:21 +03:00
|
|
|
for (i = 0; i < 6; ++i) {
|
|
|
|
cpu_seg_base[i]
|
2023-09-14 02:37:36 +03:00
|
|
|
= tcg_global_mem_new(tcg_env,
|
2015-12-17 22:19:21 +03:00
|
|
|
offsetof(CPUX86State, segs[i].base),
|
|
|
|
seg_base_names[i]);
|
|
|
|
}
|
|
|
|
|
2015-07-09 10:22:46 +03:00
|
|
|
for (i = 0; i < 4; ++i) {
|
|
|
|
cpu_bndl[i]
|
2023-09-14 02:37:36 +03:00
|
|
|
= tcg_global_mem_new_i64(tcg_env,
|
2015-07-09 10:22:46 +03:00
|
|
|
offsetof(CPUX86State, bnd_regs[i].lb),
|
|
|
|
bnd_regl_names[i]);
|
|
|
|
cpu_bndu[i]
|
2023-09-14 02:37:36 +03:00
|
|
|
= tcg_global_mem_new_i64(tcg_env,
|
2015-07-09 10:22:46 +03:00
|
|
|
offsetof(CPUX86State, bnd_regs[i].ub),
|
|
|
|
bnd_regu_names[i]);
|
|
|
|
}
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|
|
|
|
|
2018-02-20 04:51:58 +03:00
|
|
|
static void i386_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu)
|
2003-10-01 00:34:21 +04:00
|
|
|
{
|
2017-07-14 11:33:44 +03:00
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
2023-09-14 03:22:49 +03:00
|
|
|
CPUX86State *env = cpu_env(cpu);
|
2017-07-14 11:33:44 +03:00
|
|
|
uint32_t flags = dc->base.tb->flags;
|
2021-07-20 04:59:08 +03:00
|
|
|
uint32_t cflags = tb_cflags(dc->base.tb);
|
2021-05-14 18:12:59 +03:00
|
|
|
int cpl = (flags >> HF_CPL_SHIFT) & 3;
|
2021-05-14 18:13:00 +03:00
|
|
|
int iopl = (flags >> IOPL_SHIFT) & 3;
|
2004-02-17 01:10:33 +03:00
|
|
|
|
2021-05-14 18:12:58 +03:00
|
|
|
dc->cs_base = dc->base.tb->cs_base;
|
2022-10-01 17:09:35 +03:00
|
|
|
dc->pc_save = dc->base.pc_next;
|
2021-05-14 18:12:58 +03:00
|
|
|
dc->flags = flags;
|
2021-05-14 18:12:59 +03:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
dc->cpl = cpl;
|
2021-05-14 18:13:00 +03:00
|
|
|
dc->iopl = iopl;
|
2021-05-14 18:12:59 +03:00
|
|
|
#endif
|
2021-05-14 18:12:58 +03:00
|
|
|
|
|
|
|
/* We make some simplifying assumptions; validate they're correct. */
|
|
|
|
g_assert(PE(dc) == ((flags & HF_PE_MASK) != 0));
|
2021-05-14 18:12:59 +03:00
|
|
|
g_assert(CPL(dc) == cpl);
|
2021-05-14 18:13:00 +03:00
|
|
|
g_assert(IOPL(dc) == iopl);
|
2021-05-14 18:13:01 +03:00
|
|
|
g_assert(VM86(dc) == ((flags & HF_VM_MASK) != 0));
|
2021-05-14 18:13:02 +03:00
|
|
|
g_assert(CODE32(dc) == ((flags & HF_CS32_MASK) != 0));
|
2021-05-14 18:13:04 +03:00
|
|
|
g_assert(CODE64(dc) == ((flags & HF_CS64_MASK) != 0));
|
2021-05-14 18:13:03 +03:00
|
|
|
g_assert(SS32(dc) == ((flags & HF_SS32_MASK) != 0));
|
2021-05-14 18:13:05 +03:00
|
|
|
g_assert(LMA(dc) == ((flags & HF_LMA_MASK) != 0));
|
2021-05-14 18:13:06 +03:00
|
|
|
g_assert(ADDSEG(dc) == ((flags & HF_ADDSEG_MASK) != 0));
|
2021-05-14 18:13:22 +03:00
|
|
|
g_assert(SVME(dc) == ((flags & HF_SVME_MASK) != 0));
|
2021-05-14 18:13:23 +03:00
|
|
|
g_assert(GUEST(dc) == ((flags & HF_GUEST_MASK) != 0));
|
2021-05-14 18:12:58 +03:00
|
|
|
|
2003-10-01 00:34:21 +04:00
|
|
|
dc->cc_op = CC_OP_DYNAMIC;
|
2013-01-24 00:34:26 +04:00
|
|
|
dc->cc_op_dirty = false;
|
2003-10-01 00:34:21 +04:00
|
|
|
/* select memory access functions */
|
2024-01-29 13:35:06 +03:00
|
|
|
dc->mem_index = cpu_mmu_index(cpu, false);
|
2013-04-22 23:00:15 +04:00
|
|
|
dc->cpuid_features = env->features[FEAT_1_EDX];
|
|
|
|
dc->cpuid_ext_features = env->features[FEAT_1_ECX];
|
|
|
|
dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
|
|
|
|
dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
|
|
|
|
dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
|
2022-09-10 14:47:45 +03:00
|
|
|
dc->cpuid_7_0_ecx_features = env->features[FEAT_7_0_ECX];
|
2023-10-10 11:31:39 +03:00
|
|
|
dc->cpuid_7_1_eax_features = env->features[FEAT_7_1_EAX];
|
2015-07-02 17:21:23 +03:00
|
|
|
dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
|
2021-07-20 04:59:08 +03:00
|
|
|
dc->jmp_opt = !((cflags & CF_NO_GOTO_TB) ||
|
2024-05-24 18:17:47 +03:00
|
|
|
(flags & (HF_RF_MASK | HF_TF_MASK | HF_INHIBIT_IRQ_MASK)));
|
2021-05-14 18:13:19 +03:00
|
|
|
/*
|
|
|
|
* If jmp_opt, we want to handle each string instruction individually.
|
|
|
|
* For icount also disable repz optimization so that each iteration
|
|
|
|
* is accounted separately.
|
2014-12-05 12:11:13 +03:00
|
|
|
*/
|
2021-07-20 04:59:08 +03:00
|
|
|
dc->repz_opt = !dc->jmp_opt && !(cflags & CF_USE_ICOUNT);
|
2004-01-04 20:35:00 +03:00
|
|
|
|
2018-09-11 21:48:41 +03:00
|
|
|
dc->T0 = tcg_temp_new();
|
2018-09-11 21:50:46 +03:00
|
|
|
dc->T1 = tcg_temp_new();
|
2018-09-11 21:41:57 +03:00
|
|
|
dc->A0 = tcg_temp_new();
|
2008-11-17 17:43:54 +03:00
|
|
|
|
2018-09-11 21:07:57 +03:00
|
|
|
dc->tmp0 = tcg_temp_new();
|
2018-09-11 21:22:31 +03:00
|
|
|
dc->tmp1_i64 = tcg_temp_new_i64();
|
2018-09-11 21:17:18 +03:00
|
|
|
dc->tmp2_i32 = tcg_temp_new_i32();
|
2018-09-11 21:17:56 +03:00
|
|
|
dc->tmp3_i32 = tcg_temp_new_i32();
|
2018-09-11 21:10:21 +03:00
|
|
|
dc->tmp4 = tcg_temp_new();
|
2023-01-30 03:43:49 +03:00
|
|
|
dc->cc_srcT = tcg_temp_new();
|
2017-07-14 11:33:44 +03:00
|
|
|
}
|
|
|
|
|
2017-07-14 11:57:57 +03:00
|
|
|
static void i386_tr_tb_start(DisasContextBase *db, CPUState *cpu)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2017-07-14 11:37:46 +03:00
|
|
|
static void i386_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
2022-10-01 17:09:35 +03:00
|
|
|
target_ulong pc_arg = dc->base.pc_next;
|
2017-07-14 11:37:46 +03:00
|
|
|
|
2024-04-07 00:05:12 +03:00
|
|
|
dc->prev_insn_start = dc->base.insn_start;
|
2022-08-17 18:05:05 +03:00
|
|
|
dc->prev_insn_end = tcg_last_op();
|
2023-02-27 16:51:42 +03:00
|
|
|
if (tb_cflags(dcbase->tb) & CF_PCREL) {
|
2022-10-01 17:09:35 +03:00
|
|
|
pc_arg &= ~TARGET_PAGE_MASK;
|
|
|
|
}
|
|
|
|
tcg_gen_insn_start(pc_arg, dc->cc_op);
|
2017-07-14 11:37:46 +03:00
|
|
|
}
|
|
|
|
|
2017-07-14 11:45:50 +03:00
|
|
|
static void i386_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
2024-04-09 18:31:23 +03:00
|
|
|
bool orig_cc_op_dirty = dc->cc_op_dirty;
|
|
|
|
CCOp orig_cc_op = dc->cc_op;
|
|
|
|
target_ulong orig_pc_save = dc->pc_save;
|
2020-02-13 06:22:21 +03:00
|
|
|
|
|
|
|
#ifdef TARGET_VSYSCALL_PAGE
|
|
|
|
/*
|
|
|
|
* Detect entry into the vsyscall page and invoke the syscall.
|
|
|
|
*/
|
|
|
|
if ((dc->base.pc_next & TARGET_PAGE_MASK) == TARGET_VSYSCALL_PAGE) {
|
2022-10-01 17:09:12 +03:00
|
|
|
gen_exception(dc, EXCP_VSYSCALL);
|
2021-05-19 07:57:37 +03:00
|
|
|
dc->base.pc_next = dc->pc + 1;
|
2020-02-13 06:22:21 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2024-04-09 18:31:23 +03:00
|
|
|
switch (sigsetjmp(dc->jmpbuf, 0)) {
|
|
|
|
case 0:
|
|
|
|
disas_insn(dc, cpu);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
gen_exception_gpf(dc);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
/* Restore state that may affect the next instruction. */
|
|
|
|
dc->pc = dc->base.pc_next;
|
|
|
|
/*
|
|
|
|
* TODO: These save/restore can be removed after the table-based
|
|
|
|
* decoder is complete; we will be decoding the insn completely
|
|
|
|
* before any code generation that might affect these variables.
|
|
|
|
*/
|
|
|
|
dc->cc_op_dirty = orig_cc_op_dirty;
|
|
|
|
dc->cc_op = orig_cc_op;
|
|
|
|
dc->pc_save = orig_pc_save;
|
|
|
|
/* END TODO */
|
|
|
|
dc->base.num_insns--;
|
|
|
|
tcg_remove_ops_after(dc->prev_insn_end);
|
|
|
|
dc->base.insn_start = dc->prev_insn_start;
|
|
|
|
dc->base.is_jmp = DISAS_TOO_MANY;
|
|
|
|
return;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2022-10-01 17:09:11 +03:00
|
|
|
|
2024-04-09 18:31:23 +03:00
|
|
|
/*
|
|
|
|
* Instruction decoding completed (possibly with #GP if the
|
|
|
|
* 15-byte boundary was exceeded).
|
|
|
|
*/
|
|
|
|
dc->base.pc_next = dc->pc;
|
|
|
|
if (dc->base.is_jmp == DISAS_NEXT) {
|
|
|
|
if (dc->flags & (HF_TF_MASK | HF_INHIBIT_IRQ_MASK)) {
|
|
|
|
/*
|
|
|
|
* If single step mode, we generate only one instruction and
|
|
|
|
* generate an exception.
|
|
|
|
* If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
|
|
|
|
* the flag and abort the translation to give the irqs a
|
|
|
|
* chance to happen.
|
|
|
|
*/
|
|
|
|
dc->base.is_jmp = DISAS_EOB_NEXT;
|
|
|
|
} else if (!is_same_page(&dc->base, dc->base.pc_next)) {
|
|
|
|
dc->base.is_jmp = DISAS_TOO_MANY;
|
2022-08-17 18:05:05 +03:00
|
|
|
}
|
2017-07-14 11:45:50 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-14 11:49:53 +03:00
|
|
|
static void i386_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
|
|
|
|
2022-10-01 17:09:16 +03:00
|
|
|
switch (dc->base.is_jmp) {
|
|
|
|
case DISAS_NORETURN:
|
|
|
|
break;
|
|
|
|
case DISAS_TOO_MANY:
|
2022-10-01 17:09:29 +03:00
|
|
|
gen_update_cc_op(dc);
|
|
|
|
gen_jmp_rel_csize(dc, 0, 0);
|
|
|
|
break;
|
2022-10-01 17:09:16 +03:00
|
|
|
case DISAS_EOB_NEXT:
|
2022-10-01 17:09:14 +03:00
|
|
|
gen_update_eip_cur(dc);
|
2022-10-01 17:09:16 +03:00
|
|
|
/* fall through */
|
|
|
|
case DISAS_EOB_ONLY:
|
2017-07-14 11:49:53 +03:00
|
|
|
gen_eob(dc);
|
2022-10-01 17:09:16 +03:00
|
|
|
break;
|
2024-05-16 19:46:55 +03:00
|
|
|
case DISAS_EOB_RECHECK_TF:
|
|
|
|
gen_eob_syscall(dc);
|
|
|
|
break;
|
2022-10-01 17:09:16 +03:00
|
|
|
case DISAS_EOB_INHIBIT_IRQ:
|
|
|
|
gen_update_eip_cur(dc);
|
2024-04-10 15:14:10 +03:00
|
|
|
gen_eob_inhibit_irq(dc);
|
2022-10-01 17:09:16 +03:00
|
|
|
break;
|
2022-10-01 17:09:22 +03:00
|
|
|
case DISAS_JUMP:
|
|
|
|
gen_jr(dc);
|
|
|
|
break;
|
2022-10-01 17:09:16 +03:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
2017-07-14 11:49:53 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-14 11:57:57 +03:00
|
|
|
static const TranslatorOps i386_tr_ops = {
|
|
|
|
.init_disas_context = i386_tr_init_disas_context,
|
|
|
|
.tb_start = i386_tr_tb_start,
|
|
|
|
.insn_start = i386_tr_insn_start,
|
|
|
|
.translate_insn = i386_tr_translate_insn,
|
|
|
|
.tb_stop = i386_tr_tb_stop,
|
|
|
|
};
|
2014-03-31 01:50:30 +04:00
|
|
|
|
2017-07-14 11:57:57 +03:00
|
|
|
/* generate intermediate code for basic block 'tb'. */
|
2023-01-29 04:19:22 +03:00
|
|
|
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int *max_insns,
|
2024-01-19 17:39:58 +03:00
|
|
|
vaddr pc, void *host_pc)
|
2017-07-14 11:57:57 +03:00
|
|
|
{
|
|
|
|
DisasContext dc;
|
2017-07-14 11:53:55 +03:00
|
|
|
|
2022-08-11 23:48:03 +03:00
|
|
|
translator_loop(cpu, tb, max_insns, pc, host_pc, &i386_tr_ops, &dc.base);
|
2003-10-01 00:34:21 +04:00
|
|
|
}
|