2013-09-03 23:12:03 +04:00
|
|
|
#ifndef TARGET_ARM_TRANSLATE_H
|
|
|
|
#define TARGET_ARM_TRANSLATE_H
|
|
|
|
|
2023-04-02 06:38:32 +03:00
|
|
|
#include "cpu.h"
|
|
|
|
#include "tcg/tcg-op.h"
|
|
|
|
#include "tcg/tcg-op-gvec.h"
|
2023-04-02 07:12:50 +03:00
|
|
|
#include "exec/exec-all.h"
|
2017-07-14 11:21:37 +03:00
|
|
|
#include "exec/translator.h"
|
2023-03-29 21:41:03 +03:00
|
|
|
#include "exec/helper-gen.h"
|
2019-08-15 11:46:42 +03:00
|
|
|
#include "internals.h"
|
2023-10-24 19:35:05 +03:00
|
|
|
#include "cpu-features.h"
|
2017-07-14 11:21:37 +03:00
|
|
|
|
2013-09-03 23:12:03 +04:00
|
|
|
/* internal defines */
|
2022-10-20 06:06:41 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Save pc_save across a branch, so that we may restore the value from
|
|
|
|
* before the branch at the point the label is emitted.
|
|
|
|
*/
|
|
|
|
typedef struct DisasLabel {
|
|
|
|
TCGLabel *label;
|
|
|
|
target_ulong pc_save;
|
|
|
|
} DisasLabel;
|
|
|
|
|
2013-09-03 23:12:03 +04:00
|
|
|
typedef struct DisasContext {
|
2017-07-14 12:01:59 +03:00
|
|
|
DisasContextBase base;
|
2018-10-24 09:50:16 +03:00
|
|
|
const ARMISARegisters *isar;
|
2017-07-14 12:01:59 +03:00
|
|
|
|
2019-08-15 11:46:43 +03:00
|
|
|
/* The address of the current instruction being translated. */
|
|
|
|
target_ulong pc_curr;
|
2022-10-20 06:06:41 +03:00
|
|
|
/*
|
2023-02-27 16:51:41 +03:00
|
|
|
* For CF_PCREL, the full value of cpu_pc is not known
|
2022-10-20 06:06:41 +03:00
|
|
|
* (although the page offset is known). For convenience, the
|
|
|
|
* translation loop uses the full virtual address that triggered
|
|
|
|
* the translation, from base.pc_start through pc_curr.
|
|
|
|
* For efficiency, we do not update cpu_pc for every instruction.
|
|
|
|
* Instead, pc_save has the value of pc_curr at the time of the
|
|
|
|
* last update to cpu_pc, which allows us to compute the addend
|
|
|
|
* needed to bring cpu_pc current: pc_curr - pc_save.
|
|
|
|
* If cpu_pc now contains the destination of an indirect branch,
|
|
|
|
* pc_save contains -1 to indicate that relative updates are no
|
|
|
|
* longer possible.
|
|
|
|
*/
|
|
|
|
target_ulong pc_save;
|
2018-04-10 18:09:52 +03:00
|
|
|
target_ulong page_start;
|
2013-09-03 23:12:10 +04:00
|
|
|
uint32_t insn;
|
2013-09-03 23:12:03 +04:00
|
|
|
/* Nonzero if this instruction has been conditionally skipped. */
|
|
|
|
int condjmp;
|
|
|
|
/* The label that will be jumped to when the instruction is skipped. */
|
2022-10-20 06:06:41 +03:00
|
|
|
DisasLabel condlabel;
|
2013-09-03 23:12:03 +04:00
|
|
|
/* Thumb-2 conditional execution bits. */
|
|
|
|
int condexec_mask;
|
|
|
|
int condexec_cond;
|
target/arm: Add handling for PSR.ECI/ICI
On A-profile, PSR bits [15:10][26:25] are always the IT state bits.
On M-profile, some of the reserved encodings of the IT state are used
to instead indicate partial progress through instructions that were
interrupted partway through by an exception and can be resumed.
These resumable instructions fall into two categories:
(1) load/store multiple instructions, where these bits are called
"ICI" and specify the register in the ldm/stm list where execution
should resume. (Specifically: LDM, STM, VLDM, VSTM, VLLDM, VLSTM,
CLRM, VSCCLRM.)
(2) MVE instructions subject to beatwise execution, where these bits
are called "ECI" and specify which beats in this and possibly also
the following MVE insn have been executed.
There are also a few insns (LE, LETP, and BKPT) which do not use the
ICI/ECI bits but must leave them alone.
Otherwise, we should raise an INVSTATE UsageFault for any attempt to
execute an insn with non-zero ICI/ECI bits.
So far we have been able to ignore ECI/ICI, because the architecture
allows the IMPDEF choice of "always restart load/store multiple from
the beginning regardless of ICI state", so the only thing we have
been missing is that we don't raise the INVSTATE fault for bad guest
code. However, MVE requires that we honour ECI bits and do not
rexecute beats of an insn that have already been executed.
Add the support in the decoder for handling ECI/ICI:
* identify the ECI/ICI case in the CONDEXEC TB flags
* when a load/store multiple insn succeeds, it updates the ECI/ICI
state (both in DisasContext and in the CPU state), and sets a flag
to say that the ECI/ICI state was handled
* if we find that the insn we just decoded did not handle the
ECI/ICI state, we delete all the code that we just generated for
it and instead emit the code to raise the INVFAULT. This allows
us to avoid having to update every non-MVE non-LDM/STM insn to
make it check for "is ECI/ICI set?".
We continue with our existing IMPDEF choice of not caring about the
ICI state for the load/store multiples and simply restarting them
from the beginning. Because we don't allow interrupts in the middle
of an insn, the only way we would see this state is if the guest set
ICI manually on return from an exception handler, so it's a corner
case which doesn't merit optimisation.
ICI update for LDM/STM is simple -- it always zeroes the state. ECI
update for MVE beatwise insns will be a little more complex, since
the ECI state may include information for the following insn.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210614151007.4545-5-peter.maydell@linaro.org
2021-06-14 18:09:14 +03:00
|
|
|
/* M-profile ECI/ICI exception-continuable instruction state */
|
|
|
|
int eci;
|
|
|
|
/*
|
|
|
|
* trans_ functions for insns which are continuable should set this true
|
|
|
|
* after decode (ie after any UNDEF checks)
|
|
|
|
*/
|
|
|
|
bool eci_handled;
|
2016-03-04 14:30:19 +03:00
|
|
|
int sctlr_b;
|
2019-08-23 21:10:58 +03:00
|
|
|
MemOp be_data;
|
2013-09-03 23:12:03 +04:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
int user;
|
|
|
|
#endif
|
2015-02-05 16:37:23 +03:00
|
|
|
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
|
2019-02-05 19:52:39 +03:00
|
|
|
uint8_t tbii; /* TBI1|TBI0 for insns */
|
|
|
|
uint8_t tbid; /* TBI1|TBI0 for data */
|
2020-06-26 06:31:06 +03:00
|
|
|
uint8_t tcma; /* TCMA1|TCMA0 for MTE */
|
2014-12-11 15:07:48 +03:00
|
|
|
bool ns; /* Use non-secure CPREG bank on access */
|
2015-05-29 13:28:53 +03:00
|
|
|
int fp_excp_el; /* FP exception EL or 0 if enabled */
|
2018-01-23 06:53:49 +03:00
|
|
|
int sve_excp_el; /* SVE exception EL or 0 if enabled */
|
2022-06-20 20:51:46 +03:00
|
|
|
int sme_excp_el; /* SME exception EL or 0 if enabled */
|
2022-06-08 21:38:54 +03:00
|
|
|
int vl; /* current vector length in bytes */
|
2022-06-20 20:52:03 +03:00
|
|
|
int svl; /* current streaming vector length in bytes */
|
2014-04-15 22:18:39 +04:00
|
|
|
bool vfp_enabled; /* FP enabled via FPSCR.EN */
|
2013-09-03 23:12:03 +04:00
|
|
|
int vec_len;
|
|
|
|
int vec_stride;
|
2017-04-20 19:32:31 +03:00
|
|
|
bool v7m_handler_mode;
|
2017-09-07 15:54:54 +03:00
|
|
|
bool v8m_secure; /* true if v8M and we're in Secure mode */
|
2018-10-08 16:55:04 +03:00
|
|
|
bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */
|
2019-04-29 19:36:01 +03:00
|
|
|
bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */
|
2019-04-29 19:36:01 +03:00
|
|
|
bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */
|
2019-04-29 19:36:02 +03:00
|
|
|
bool v7m_lspact; /* FPCCR.LSPACT set */
|
2014-04-15 22:18:38 +04:00
|
|
|
/* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
|
|
|
|
* so that top level loop can generate correct syndrome information.
|
|
|
|
*/
|
|
|
|
uint32_t svc_imm;
|
2014-10-24 15:19:14 +04:00
|
|
|
int current_el;
|
2014-01-05 02:15:44 +04:00
|
|
|
GHashTable *cp_regs;
|
2014-03-17 20:31:47 +04:00
|
|
|
uint64_t features; /* CPU features bits */
|
2022-04-17 20:43:31 +03:00
|
|
|
bool aarch64;
|
2022-04-17 20:43:34 +03:00
|
|
|
bool thumb;
|
2023-06-06 12:19:35 +03:00
|
|
|
bool lse2;
|
2014-04-15 22:18:40 +04:00
|
|
|
/* Because unallocated encodings generate different exception syndrome
|
|
|
|
* information from traps due to FP being disabled, we can't do a single
|
|
|
|
* "is fp access disabled" check at a high level in the decode tree.
|
|
|
|
* To help in catching bugs where the access check was forgotten in some
|
|
|
|
* code path, we set this flag when the access check is done, and assert
|
|
|
|
* that it is set at the point where we actually touch the FP regs.
|
|
|
|
*/
|
|
|
|
bool fp_access_checked;
|
2020-08-28 12:02:47 +03:00
|
|
|
bool sve_access_checked;
|
2014-08-19 21:56:26 +04:00
|
|
|
/* ARMv8 single-step state (this is distinct from the QEMU gdbstub
|
|
|
|
* single-step support).
|
|
|
|
*/
|
|
|
|
bool ss_active;
|
|
|
|
bool pstate_ss;
|
|
|
|
/* True if the insn just emitted was a load-exclusive instruction
|
|
|
|
* (necessary for syndrome information for single step exceptions),
|
|
|
|
* ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
|
|
|
|
*/
|
|
|
|
bool is_ldex;
|
2020-02-07 17:04:26 +03:00
|
|
|
/* True if AccType_UNPRIV should be used for LDTR et al */
|
|
|
|
bool unpriv;
|
2019-01-21 13:23:11 +03:00
|
|
|
/* True if v8.3-PAuth is active. */
|
|
|
|
bool pauth_active;
|
2023-09-12 17:04:30 +03:00
|
|
|
/* True if v8.5-MTE access to tags is enabled; index with is_unpriv. */
|
|
|
|
bool ata[2];
|
2020-06-26 06:31:06 +03:00
|
|
|
/* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */
|
|
|
|
bool mte_active[2];
|
2019-02-05 19:52:36 +03:00
|
|
|
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
|
|
|
|
bool bt;
|
2019-12-01 15:20:17 +03:00
|
|
|
/* True if any CP15 access is trapped by HSTR_EL2 */
|
|
|
|
bool hstr_active;
|
2021-04-19 23:22:36 +03:00
|
|
|
/* True if memory operations require alignment */
|
|
|
|
bool align_mem;
|
2021-09-13 18:07:24 +03:00
|
|
|
/* True if PSTATE.IL is set */
|
|
|
|
bool pstate_il;
|
2022-06-20 20:51:52 +03:00
|
|
|
/* True if PSTATE.SM is set. */
|
|
|
|
bool pstate_sm;
|
|
|
|
/* True if PSTATE.ZA is set. */
|
|
|
|
bool pstate_za;
|
2022-07-08 18:14:58 +03:00
|
|
|
/* True if non-streaming insns should raise an SME Streaming exception. */
|
|
|
|
bool sme_trap_nonstreaming;
|
|
|
|
/* True if the current instruction is non-streaming. */
|
|
|
|
bool is_nonstreaming;
|
2021-09-13 12:54:31 +03:00
|
|
|
/* True if MVE insns are definitely not predicated by VPR or LTPSIZE */
|
|
|
|
bool mve_no_pred;
|
2023-01-30 21:24:45 +03:00
|
|
|
/* True if fine-grained traps are active */
|
|
|
|
bool fgt_active;
|
2023-01-30 21:24:57 +03:00
|
|
|
/* True if fine-grained trap on SVC is enabled */
|
|
|
|
bool fgt_svc;
|
2024-01-09 17:43:45 +03:00
|
|
|
/* True if a trap on ERET is enabled (FGT or NV) */
|
|
|
|
bool trap_eret;
|
2023-06-06 12:19:38 +03:00
|
|
|
/* True if FEAT_LSE2 SCTLR_ELx.nAA is set */
|
|
|
|
bool naa;
|
2024-01-09 17:43:48 +03:00
|
|
|
/* True if FEAT_NV HCR_EL2.NV is enabled */
|
|
|
|
bool nv;
|
target/arm: Handle FEAT_NV2 redirection of SPSR_EL2, ELR_EL2, ESR_EL2, FAR_EL2
Under FEAT_NV2, when HCR_EL2.{NV,NV2} == 0b11 at EL1, accesses to the
registers SPSR_EL2, ELR_EL2, ESR_EL2, FAR_EL2 and TFSR_EL2 (which
would UNDEF without FEAT_NV or FEAT_NV2) should instead access the
equivalent EL1 registers SPSR_EL1, ELR_EL1, ESR_EL1, FAR_EL1 and
TFSR_EL1.
Because there are only five registers involved and the encoding for
the EL1 register is identical to that of the EL2 register except
that opc1 is 0, we handle this by finding the EL1 register in the
hash table and using it instead.
Note that traps that apply to direct accesses to the EL1 register,
such as active fine-grained traps or other trap bits, do not trigger
when it is accessed via the EL2 encoding in this way. However, some
traps that are defined by the EL2 register may apply. We therefore
call the EL2 register's accessfn first. The only one of the five
which has such traps is TFSR_EL2: make sure its accessfn correctly
handles both FEAT_NV (where we trap to EL2 without checking ATA bits)
and FEAT_NV2 (where we check ATA bits and then redirect to TFSR_EL1).
(We don't need the NV1 tbflag bit until the next patch, but we
introduce it here to avoid putting the NV, NV1, NV2 bits in an
odd order.)
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Tested-by: Miguel Luis <miguel.luis@oracle.com>
2024-01-09 17:43:53 +03:00
|
|
|
/* True if NV enabled and HCR_EL2.NV1 is set */
|
|
|
|
bool nv1;
|
|
|
|
/* True if NV enabled and HCR_EL2.NV2 is set */
|
|
|
|
bool nv2;
|
2024-01-09 17:43:53 +03:00
|
|
|
/* True if NV2 enabled and NV2 RAM accesses use EL2&0 translation regime */
|
|
|
|
bool nv2_mem_e20;
|
|
|
|
/* True if NV2 enabled and NV2 RAM accesses are big-endian */
|
|
|
|
bool nv2_mem_be;
|
2019-02-05 19:52:37 +03:00
|
|
|
/*
|
|
|
|
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
|
|
|
|
* < 0, set by the current instruction.
|
|
|
|
*/
|
|
|
|
int8_t btype;
|
2020-06-26 06:31:17 +03:00
|
|
|
/* A copy of cpu->dcz_blocksize. */
|
|
|
|
uint8_t dcz_blocksize;
|
2023-08-31 11:45:14 +03:00
|
|
|
/* A copy of cpu->gm_blocksize. */
|
|
|
|
uint8_t gm_blocksize;
|
2024-04-06 23:52:33 +03:00
|
|
|
/* True if the current insn_start has been updated. */
|
|
|
|
bool insn_start_updated;
|
target/arm: Fix usage of MMU indexes when EL3 is AArch32
Our current usage of MMU indexes when EL3 is AArch32 is confused.
Architecturally, when EL3 is AArch32, all Secure code runs under the
Secure PL1&0 translation regime:
* code at EL3, which might be Mon, or SVC, or any of the
other privileged modes (PL1)
* code at EL0 (Secure PL0)
This is different from when EL3 is AArch64, in which case EL3 is its
own translation regime, and EL1 and EL0 (whether AArch32 or AArch64)
have their own regime.
We claimed to be mapping Secure PL1 to our ARMMMUIdx_EL3, but didn't
do anything special about Secure PL0, which meant it used the same
ARMMMUIdx_EL10_0 that NonSecure PL0 does. This resulted in a bug
where arm_sctlr() incorrectly picked the NonSecure SCTLR as the
controlling register when in Secure PL0, which meant we were
spuriously generating alignment faults because we were looking at the
wrong SCTLR control bits.
The use of ARMMMUIdx_EL3 for Secure PL1 also resulted in the bug that
we wouldn't honour the PAN bit for Secure PL1, because there's no
equivalent _PAN mmu index for it.
We could fix this in one of two ways:
* The most straightforward is to add new MMU indexes EL30_0,
EL30_3, EL30_3_PAN to correspond to "Secure PL1&0 at PL0",
"Secure PL1&0 at PL1", and "Secure PL1&0 at PL1 with PAN".
This matches how we use indexes for the AArch64 regimes, and
preserves propirties like being able to determine the privilege
level from an MMU index without any other information. However
it would add two MMU indexes (we can share one with ARMMMUIdx_EL3),
and we are already using 14 of the 16 the core TLB code permits.
* The more complicated approach is the one we take here. We use
the same MMU indexes (E10_0, E10_1, E10_1_PAN) for Secure PL1&0
than we do for NonSecure PL1&0. This saves on MMU indexes, but
means we need to check in some places whether we're in the
Secure PL1&0 regime or not before we interpret an MMU index.
The changes in this commit were created by auditing all the places
where we use specific ARMMMUIdx_ values, and checking whether they
needed to be changed to handle the new index value usage.
Note for potential stable backports: taking also the previous
(comment-change-only) commit might make the backport easier.
Cc: qemu-stable@nongnu.org
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/2326
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Tested-by: Bernhard Beschow <shentey@gmail.com>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20240809160430.1144805-3-peter.maydell@linaro.org
2024-08-09 19:04:30 +03:00
|
|
|
/* True if this is the AArch32 Secure PL1&0 translation regime */
|
|
|
|
bool s_pl1_0;
|
2014-09-29 21:48:48 +04:00
|
|
|
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
|
|
|
|
int c15_cpar;
|
2024-01-09 17:43:53 +03:00
|
|
|
/* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */
|
|
|
|
uint32_t nv2_redirect_offset;
|
2013-09-03 23:12:03 +04:00
|
|
|
} DisasContext;
|
|
|
|
|
2015-09-14 16:39:47 +03:00
|
|
|
typedef struct DisasCompare {
|
|
|
|
TCGCond cond;
|
|
|
|
TCGv_i32 value;
|
|
|
|
} DisasCompare;
|
|
|
|
|
2015-09-14 16:39:47 +03:00
|
|
|
/* Share the TCG temporaries common between 32 and 64 bit modes. */
|
|
|
|
extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
|
|
|
|
extern TCGv_i64 cpu_exclusive_addr;
|
|
|
|
extern TCGv_i64 cpu_exclusive_val;
|
2013-09-03 23:12:04 +04:00
|
|
|
|
2021-04-30 16:27:28 +03:00
|
|
|
/*
|
|
|
|
* Constant expanders for the decoders.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static inline int negate(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return -x;
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:16:03 +03:00
|
|
|
static inline int plus_1(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x + 1;
|
|
|
|
}
|
|
|
|
|
2021-04-30 16:27:28 +03:00
|
|
|
static inline int plus_2(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x + 2;
|
|
|
|
}
|
|
|
|
|
2022-07-08 18:15:14 +03:00
|
|
|
static inline int plus_12(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x + 12;
|
|
|
|
}
|
|
|
|
|
2021-04-30 16:27:28 +03:00
|
|
|
static inline int times_2(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x * 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int times_4(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x * 4;
|
|
|
|
}
|
|
|
|
|
2023-11-06 18:00:29 +03:00
|
|
|
static inline int times_8(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x * 8;
|
|
|
|
}
|
|
|
|
|
2021-06-17 15:16:03 +03:00
|
|
|
static inline int times_2_plus_1(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x * 2 + 1;
|
|
|
|
}
|
|
|
|
|
2021-06-28 16:58:25 +03:00
|
|
|
static inline int rsub_64(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return 64 - x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int rsub_32(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return 32 - x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int rsub_16(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return 16 - x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int rsub_8(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return 8 - x;
|
|
|
|
}
|
|
|
|
|
2023-05-12 17:40:52 +03:00
|
|
|
static inline int shl_12(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x << 12;
|
|
|
|
}
|
|
|
|
|
2024-05-25 02:20:33 +03:00
|
|
|
static inline int xor_2(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x ^ 2;
|
|
|
|
}
|
|
|
|
|
2021-09-01 11:02:34 +03:00
|
|
|
static inline int neon_3same_fp_size(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
/* Convert 0==fp32, 1==fp16 into a MO_* value */
|
|
|
|
return MO_32 - x;
|
|
|
|
}
|
|
|
|
|
2014-03-17 20:31:47 +04:00
|
|
|
static inline int arm_dc_feature(DisasContext *dc, int feature)
|
|
|
|
{
|
|
|
|
return (dc->features & (1ULL << feature)) != 0;
|
|
|
|
}
|
|
|
|
|
2014-05-27 20:09:50 +04:00
|
|
|
static inline int get_mem_index(DisasContext *s)
|
|
|
|
{
|
2017-06-02 13:51:47 +03:00
|
|
|
return arm_to_core_mmu_idx(s->mmu_idx);
|
2014-05-27 20:09:50 +04:00
|
|
|
}
|
|
|
|
|
2018-01-25 14:45:28 +03:00
|
|
|
static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
|
2017-02-07 21:30:00 +03:00
|
|
|
{
|
|
|
|
/* We don't need to save all of the syndrome so we mask and shift
|
|
|
|
* out unneeded bits to help the sleb128 encoder do a better job.
|
|
|
|
*/
|
|
|
|
syn &= ARM_INSN_START_WORD2_MASK;
|
|
|
|
syn >>= ARM_INSN_START_WORD2_SHIFT;
|
|
|
|
|
2024-04-06 23:52:33 +03:00
|
|
|
/* Check for multiple updates. */
|
|
|
|
assert(!s->insn_start_updated);
|
|
|
|
s->insn_start_updated = true;
|
|
|
|
tcg_set_insn_start_param(s->base.insn_start, 2, syn);
|
2017-02-07 21:30:00 +03:00
|
|
|
}
|
|
|
|
|
2022-10-20 06:06:33 +03:00
|
|
|
static inline int curr_insn_len(DisasContext *s)
|
|
|
|
{
|
|
|
|
return s->base.pc_next - s->pc_curr;
|
|
|
|
}
|
|
|
|
|
2017-07-14 11:21:37 +03:00
|
|
|
/* is_jmp field values */
|
|
|
|
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
|
2020-06-26 06:31:03 +03:00
|
|
|
/* CPU state was modified dynamically; exit to main loop for interrupts. */
|
|
|
|
#define DISAS_UPDATE_EXIT DISAS_TARGET_1
|
2013-12-17 23:42:31 +04:00
|
|
|
/* These instructions trap after executing, so the A32/T32 decoder must
|
|
|
|
* defer them until after the conditional execution state has been updated.
|
|
|
|
* WFI also needs special handling when single-stepping.
|
|
|
|
*/
|
2017-07-14 11:21:37 +03:00
|
|
|
#define DISAS_WFI DISAS_TARGET_2
|
|
|
|
#define DISAS_SWI DISAS_TARGET_3
|
2014-03-10 18:56:30 +04:00
|
|
|
/* WFE */
|
2017-07-14 11:21:37 +03:00
|
|
|
#define DISAS_WFE DISAS_TARGET_4
|
|
|
|
#define DISAS_HVC DISAS_TARGET_5
|
|
|
|
#define DISAS_SMC DISAS_TARGET_6
|
|
|
|
#define DISAS_YIELD DISAS_TARGET_7
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-20 19:32:31 +03:00
|
|
|
/* M profile branch which might be an exception return (and so needs
|
|
|
|
* custom end-of-TB code)
|
|
|
|
*/
|
2017-07-14 11:21:37 +03:00
|
|
|
#define DISAS_BX_EXCRET DISAS_TARGET_8
|
2020-06-26 06:31:03 +03:00
|
|
|
/*
|
|
|
|
* For instructions which want an immediate exit to the main loop, as opposed
|
|
|
|
* to attempting to use lookup_and_goto_ptr. Unlike DISAS_UPDATE_EXIT, this
|
|
|
|
* doesn't write the PC on exiting the translation loop so you need to ensure
|
2022-10-20 06:06:35 +03:00
|
|
|
* something (gen_a64_update_pc or runtime helper) has done so before we reach
|
2020-06-26 06:31:03 +03:00
|
|
|
* return from cpu_tb_exec.
|
2017-04-27 06:29:20 +03:00
|
|
|
*/
|
2017-07-14 11:21:37 +03:00
|
|
|
#define DISAS_EXIT DISAS_TARGET_9
|
2020-06-26 06:31:04 +03:00
|
|
|
/* CPU state was modified dynamically; no need to exit, but do not chain. */
|
|
|
|
#define DISAS_UPDATE_NOCHAIN DISAS_TARGET_10
|
2013-12-17 23:42:31 +04:00
|
|
|
|
2013-09-03 23:12:10 +04:00
|
|
|
#ifdef TARGET_AARCH64
|
|
|
|
void a64_translate_init(void);
|
2022-10-20 06:06:35 +03:00
|
|
|
void gen_a64_update_pc(DisasContext *s, target_long diff);
|
2017-07-14 12:58:33 +03:00
|
|
|
extern const TranslatorOps aarch64_translator_ops;
|
2013-09-03 23:12:10 +04:00
|
|
|
#else
|
|
|
|
static inline void a64_translate_init(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2022-10-20 06:06:35 +03:00
|
|
|
static inline void gen_a64_update_pc(DisasContext *s, target_long diff)
|
2013-09-03 23:12:10 +04:00
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-09-14 16:39:47 +03:00
|
|
|
void arm_test_cc(DisasCompare *cmp, int cc);
|
|
|
|
void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
|
2015-02-13 23:51:55 +03:00
|
|
|
void arm_gen_test_cc(int cc, TCGLabel *label);
|
2021-04-19 23:22:48 +03:00
|
|
|
MemOp pow2_align(unsigned i);
|
2021-04-30 16:27:29 +03:00
|
|
|
void unallocated_encoding(DisasContext *s);
|
2022-10-20 06:06:36 +03:00
|
|
|
void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp,
|
2022-06-10 16:32:32 +03:00
|
|
|
uint32_t syn, uint32_t target_el);
|
2022-10-20 06:06:36 +03:00
|
|
|
void gen_exception_insn(DisasContext *s, target_long pc_diff,
|
|
|
|
int excp, uint32_t syn);
|
2013-12-17 23:42:33 +04:00
|
|
|
|
2018-05-07 15:17:16 +03:00
|
|
|
/* Return state of Alternate Half-precision flag, caller frees result */
|
|
|
|
static inline TCGv_i32 get_ahp_flag(void)
|
|
|
|
{
|
|
|
|
TCGv_i32 ret = tcg_temp_new_i32();
|
|
|
|
|
2024-06-28 17:23:44 +03:00
|
|
|
tcg_gen_ld_i32(ret, tcg_env, offsetoflow32(CPUARMState, vfp.fpcr));
|
2018-05-07 15:17:16 +03:00
|
|
|
tcg_gen_extract_i32(ret, ret, 26, 1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-01 23:04:56 +03:00
|
|
|
/* Set bits within PSTATE. */
|
|
|
|
static inline void set_pstate_bits(uint32_t bits)
|
|
|
|
{
|
|
|
|
TCGv_i32 p = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
|
|
|
|
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
|
2019-03-01 23:04:56 +03:00
|
|
|
tcg_gen_ori_i32(p, p, bits);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
|
2019-03-01 23:04:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear bits within PSTATE. */
|
|
|
|
static inline void clear_pstate_bits(uint32_t bits)
|
|
|
|
{
|
|
|
|
TCGv_i32 p = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
|
|
|
|
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate));
|
2019-03-01 23:04:56 +03:00
|
|
|
tcg_gen_andi_i32(p, p, ~bits);
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate));
|
2019-03-01 23:04:56 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If the singlestep state is Active-not-pending, advance to Active-pending. */
|
|
|
|
static inline void gen_ss_advance(DisasContext *s)
|
|
|
|
{
|
|
|
|
if (s->ss_active) {
|
|
|
|
s->pstate_ss = 0;
|
|
|
|
clear_pstate_bits(PSTATE_SS);
|
|
|
|
}
|
|
|
|
}
|
2018-10-24 09:50:19 +03:00
|
|
|
|
2019-08-15 11:46:42 +03:00
|
|
|
/* Generate an architectural singlestep exception */
|
|
|
|
static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
|
|
|
|
{
|
2022-06-10 16:32:32 +03:00
|
|
|
/* Fill in the same_el field of the syndrome in the helper. */
|
|
|
|
uint32_t syn = syn_swstep(false, isv, ex);
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_exception_swstep(tcg_env, tcg_constant_i32(syn));
|
2019-08-15 11:46:42 +03:00
|
|
|
}
|
|
|
|
|
2019-06-13 19:39:06 +03:00
|
|
|
/*
|
|
|
|
* Given a VFP floating point constant encoded into an 8 bit immediate in an
|
|
|
|
* instruction, expand it to the actual constant value of the specified
|
|
|
|
* size, as per the VFPExpandImm() pseudocode in the Arm ARM.
|
|
|
|
*/
|
|
|
|
uint64_t vfp_expand_imm(int size, uint8_t imm8);
|
|
|
|
|
2024-05-25 02:20:36 +03:00
|
|
|
static inline void gen_vfp_absh(TCGv_i32 d, TCGv_i32 s)
|
|
|
|
{
|
|
|
|
tcg_gen_andi_i32(d, s, INT16_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_vfp_abss(TCGv_i32 d, TCGv_i32 s)
|
|
|
|
{
|
|
|
|
tcg_gen_andi_i32(d, s, INT32_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_vfp_absd(TCGv_i64 d, TCGv_i64 s)
|
|
|
|
{
|
|
|
|
tcg_gen_andi_i64(d, s, INT64_MAX);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_vfp_negh(TCGv_i32 d, TCGv_i32 s)
|
|
|
|
{
|
|
|
|
tcg_gen_xori_i32(d, s, 1u << 15);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_vfp_negs(TCGv_i32 d, TCGv_i32 s)
|
|
|
|
{
|
|
|
|
tcg_gen_xori_i32(d, s, 1u << 31);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_vfp_negd(TCGv_i64 d, TCGv_i64 s)
|
|
|
|
{
|
|
|
|
tcg_gen_xori_i64(d, s, 1ull << 63);
|
|
|
|
}
|
|
|
|
|
2018-10-24 09:50:19 +03:00
|
|
|
/* Vector operations shared between ARM and AArch64. */
|
2020-05-13 19:32:35 +03:00
|
|
|
void gen_gvec_ceq0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_clt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_cgt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_cle0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_cge0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 19:32:36 +03:00
|
|
|
void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 19:32:38 +03:00
|
|
|
void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-28 23:30:21 +03:00
|
|
|
void gen_gvec_srshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_urshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-28 23:30:23 +03:00
|
|
|
void gen_neon_sqshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_neon_uqshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-28 23:30:25 +03:00
|
|
|
void gen_neon_sqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_neon_uqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-28 23:30:31 +03:00
|
|
|
|
|
|
|
void gen_gvec_shadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_uhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-28 23:30:33 +03:00
|
|
|
void gen_gvec_shsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_uhsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-28 23:30:35 +03:00
|
|
|
void gen_gvec_srhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_urhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2020-05-13 19:32:38 +03:00
|
|
|
|
2018-10-24 09:50:20 +03:00
|
|
|
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
|
2020-02-17 00:42:29 +03:00
|
|
|
void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
|
|
|
|
void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
|
|
|
|
void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
|
|
|
|
void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
|
2018-10-24 09:50:19 +03:00
|
|
|
|
2024-05-28 23:30:17 +03:00
|
|
|
void gen_uqadd_bhs(TCGv_i64 res, TCGv_i64 qc,
|
|
|
|
TCGv_i64 a, TCGv_i64 b, MemOp esz);
|
|
|
|
void gen_uqadd_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
|
2020-05-13 19:32:39 +03:00
|
|
|
void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-28 23:30:17 +03:00
|
|
|
|
|
|
|
void gen_sqadd_bhs(TCGv_i64 res, TCGv_i64 qc,
|
|
|
|
TCGv_i64 a, TCGv_i64 b, MemOp esz);
|
|
|
|
void gen_sqadd_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
|
2020-05-13 19:32:39 +03:00
|
|
|
void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-28 23:30:17 +03:00
|
|
|
|
|
|
|
void gen_uqsub_bhs(TCGv_i64 res, TCGv_i64 qc,
|
|
|
|
TCGv_i64 a, TCGv_i64 b, MemOp esz);
|
|
|
|
void gen_uqsub_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
|
2020-05-13 19:32:39 +03:00
|
|
|
void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-28 23:30:17 +03:00
|
|
|
|
|
|
|
void gen_sqsub_bhs(TCGv_i64 res, TCGv_i64 qc,
|
|
|
|
TCGv_i64 a, TCGv_i64 b, MemOp esz);
|
|
|
|
void gen_sqsub_d(TCGv_i64 d, TCGv_i64 q, TCGv_i64 a, TCGv_i64 b);
|
2020-05-13 19:32:39 +03:00
|
|
|
void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 19:32:30 +03:00
|
|
|
void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2024-05-25 02:20:21 +03:00
|
|
|
void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh);
|
|
|
|
void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh);
|
|
|
|
void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh);
|
|
|
|
void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh);
|
|
|
|
|
2020-05-13 19:32:31 +03:00
|
|
|
void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 19:32:32 +03:00
|
|
|
void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2024-05-28 23:30:41 +03:00
|
|
|
void gen_gvec_sqdmulh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_sqrdmulh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2020-05-13 19:32:41 +03:00
|
|
|
void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 19:32:44 +03:00
|
|
|
void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 19:32:45 +03:00
|
|
|
void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2024-05-25 02:20:45 +03:00
|
|
|
void gen_gvec_addp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-25 02:20:47 +03:00
|
|
|
void gen_gvec_smaxp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_sminp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_umaxp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_uminp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
2024-05-25 02:20:45 +03:00
|
|
|
|
2018-10-24 09:50:16 +03:00
|
|
|
/*
|
|
|
|
* Forward to the isar_feature_* tests given a DisasContext pointer.
|
|
|
|
*/
|
|
|
|
#define dc_isar_feature(name, ctx) \
|
|
|
|
({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
|
|
|
|
|
2020-04-30 21:09:41 +03:00
|
|
|
/* Note that the gvec expanders operate on offsets + sizes. */
|
|
|
|
typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
|
|
|
|
typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
|
|
|
|
uint32_t, uint32_t);
|
|
|
|
typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
|
|
|
|
uint32_t, uint32_t, uint32_t);
|
|
|
|
typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
|
|
|
|
uint32_t, uint32_t, uint32_t);
|
|
|
|
|
2020-04-30 21:09:49 +03:00
|
|
|
/* Function prototype for gen_ functions for calling Neon helpers */
|
2020-06-16 20:08:35 +03:00
|
|
|
typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32);
|
2020-04-30 21:09:49 +03:00
|
|
|
typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
|
|
|
|
typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
|
|
|
|
typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
|
2021-04-30 16:27:38 +03:00
|
|
|
typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
|
|
|
|
TCGv_i32, TCGv_i32);
|
2020-04-30 21:09:49 +03:00
|
|
|
typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
|
|
|
|
typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
|
|
|
|
typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
|
|
|
|
typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
|
|
|
|
typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
|
2020-06-16 12:32:25 +03:00
|
|
|
typedef void NeonGenTwoOpWidenFn(TCGv_i64, TCGv_i32, TCGv_i32);
|
2020-06-16 20:08:38 +03:00
|
|
|
typedef void NeonGenOneSingleOpFn(TCGv_i32, TCGv_i32, TCGv_ptr);
|
2020-06-16 20:08:33 +03:00
|
|
|
typedef void NeonGenTwoSingleOpFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
|
|
|
|
typedef void NeonGenTwoDoubleOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
|
2020-06-16 20:08:32 +03:00
|
|
|
typedef void NeonGenOne64OpFn(TCGv_i64, TCGv_i64);
|
2020-04-30 21:09:49 +03:00
|
|
|
typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
|
|
|
|
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
|
|
|
|
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
|
|
|
|
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
|
target/arm: Implement MVE long shifts by immediate
The MVE extension to v8.1M includes some new shift instructions which
sit entirely within the non-coprocessor part of the encoding space
and which operate only on general-purpose registers. They take up
the space which was previously UNPREDICTABLE MOVS and ORRS encodings
with Rm == 13 or 15.
Implement the long shifts by immediate, which perform shifts on a
pair of general-purpose registers treated as a 64-bit quantity, with
an immediate shift count between 1 and 32.
Awkwardly, because the MOVS and ORRS trans functions do not UNDEF for
the Rm==13,15 case, we need to explicitly emit code to UNDEF for the
cases where v8.1M now requires that. (Trying to change MOVS and ORRS
is too difficult, because the functions that generate the code are
shared between a dozen different kinds of arithmetic or logical
instruction for all A32, T16 and T32 encodings, and for some insns
and some encodings Rm==13,15 are valid.)
We make the helper functions we need for UQSHLL and SQSHLL take
a 32-bit value which the helper casts to int8_t because we'll need
these helpers also for the shift-by-register insns, where the shift
count might be < 0 or > 32.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210628135835.6690-16-peter.maydell@linaro.org
2021-06-28 16:58:32 +03:00
|
|
|
typedef void WideShiftImmFn(TCGv_i64, TCGv_i64, int64_t shift);
|
2021-06-28 16:58:33 +03:00
|
|
|
typedef void WideShiftFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i32);
|
2021-06-28 16:58:34 +03:00
|
|
|
typedef void ShiftImmFn(TCGv_i32, TCGv_i32, int32_t shift);
|
2021-06-28 16:58:35 +03:00
|
|
|
typedef void ShiftFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
|
2020-04-30 21:09:49 +03:00
|
|
|
|
2021-04-19 23:22:31 +03:00
|
|
|
/**
|
|
|
|
* arm_tbflags_from_tb:
|
|
|
|
* @tb: the TranslationBlock
|
|
|
|
*
|
|
|
|
* Extract the flag values from @tb.
|
|
|
|
*/
|
|
|
|
static inline CPUARMTBFlags arm_tbflags_from_tb(const TranslationBlock *tb)
|
|
|
|
{
|
2021-04-19 23:22:32 +03:00
|
|
|
return (CPUARMTBFlags){ tb->flags, tb->cs_base };
|
2021-04-19 23:22:31 +03:00
|
|
|
}
|
|
|
|
|
target/arm: Replace A64 get_fpstatus_ptr() with generic fpstatus_ptr()
We currently have two versions of get_fpstatus_ptr(), which both take
an effectively boolean argument:
* the one for A64 takes "bool is_f16" to distinguish fp16 from other ops
* the one for A32/T32 takes "int neon" to distinguish Neon from other ops
This is confusing, and to implement ARMv8.2-FP16 the A32/T32 one will
need to make a four-way distinction between "non-Neon, FP16",
"non-Neon, single/double", "Neon, FP16" and "Neon, single/double".
The A64 version will then be a strict subset of the A32/T32 version.
To clean this all up, we want to go to a single implementation which
takes an enum argument with values FPST_FPCR, FPST_STD,
FPST_FPCR_F16, and FPST_STD_F16. We rename the function to
fpstatus_ptr() so that unconverted code gets a compilation error
rather than silently passing the wrong thing to the new function.
This commit implements that new API, and converts A64 to use it:
get_fpstatus_ptr(false) -> fpstatus_ptr(FPST_FPCR)
get_fpstatus_ptr(true) -> fpstatus_ptr(FPST_FPCR_F16)
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20200806104453.30393-2-peter.maydell@linaro.org
2020-08-06 13:44:50 +03:00
|
|
|
/*
|
|
|
|
* Enum for argument to fpstatus_ptr().
|
|
|
|
*/
|
|
|
|
typedef enum ARMFPStatusFlavour {
|
|
|
|
FPST_FPCR,
|
|
|
|
FPST_FPCR_F16,
|
|
|
|
FPST_STD,
|
|
|
|
FPST_STD_F16,
|
|
|
|
} ARMFPStatusFlavour;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* fpstatus_ptr: return TCGv_ptr to the specified fp_status field
|
|
|
|
*
|
|
|
|
* We have multiple softfloat float_status fields in the Arm CPU state struct
|
|
|
|
* (see the comment in cpu.h for details). Return a TCGv_ptr which has
|
|
|
|
* been set up to point to the requested field in the CPU state struct.
|
|
|
|
* The options are:
|
|
|
|
*
|
|
|
|
* FPST_FPCR
|
|
|
|
* for non-FP16 operations controlled by the FPCR
|
|
|
|
* FPST_FPCR_F16
|
|
|
|
* for operations controlled by the FPCR where FPCR.FZ16 is to be used
|
|
|
|
* FPST_STD
|
|
|
|
* for A32/T32 Neon operations using the "standard FPSCR value"
|
|
|
|
* FPST_STD_F16
|
|
|
|
* as FPST_STD, but where FPCR.FZ16 is to be used
|
|
|
|
*/
|
|
|
|
static inline TCGv_ptr fpstatus_ptr(ARMFPStatusFlavour flavour)
|
|
|
|
{
|
|
|
|
TCGv_ptr statusptr = tcg_temp_new_ptr();
|
|
|
|
int offset;
|
|
|
|
|
|
|
|
switch (flavour) {
|
|
|
|
case FPST_FPCR:
|
|
|
|
offset = offsetof(CPUARMState, vfp.fp_status);
|
|
|
|
break;
|
|
|
|
case FPST_FPCR_F16:
|
|
|
|
offset = offsetof(CPUARMState, vfp.fp_status_f16);
|
|
|
|
break;
|
|
|
|
case FPST_STD:
|
|
|
|
offset = offsetof(CPUARMState, vfp.standard_fp_status);
|
|
|
|
break;
|
|
|
|
case FPST_STD_F16:
|
2020-08-06 13:44:52 +03:00
|
|
|
offset = offsetof(CPUARMState, vfp.standard_fp_status_f16);
|
|
|
|
break;
|
target/arm: Replace A64 get_fpstatus_ptr() with generic fpstatus_ptr()
We currently have two versions of get_fpstatus_ptr(), which both take
an effectively boolean argument:
* the one for A64 takes "bool is_f16" to distinguish fp16 from other ops
* the one for A32/T32 takes "int neon" to distinguish Neon from other ops
This is confusing, and to implement ARMv8.2-FP16 the A32/T32 one will
need to make a four-way distinction between "non-Neon, FP16",
"non-Neon, single/double", "Neon, FP16" and "Neon, single/double".
The A64 version will then be a strict subset of the A32/T32 version.
To clean this all up, we want to go to a single implementation which
takes an enum argument with values FPST_FPCR, FPST_STD,
FPST_FPCR_F16, and FPST_STD_F16. We rename the function to
fpstatus_ptr() so that unconverted code gets a compilation error
rather than silently passing the wrong thing to the new function.
This commit implements that new API, and converts A64 to use it:
get_fpstatus_ptr(false) -> fpstatus_ptr(FPST_FPCR)
get_fpstatus_ptr(true) -> fpstatus_ptr(FPST_FPCR_F16)
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20200806104453.30393-2-peter.maydell@linaro.org
2020-08-06 13:44:50 +03:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2023-09-14 02:37:36 +03:00
|
|
|
tcg_gen_addi_ptr(statusptr, tcg_env, offset);
|
target/arm: Replace A64 get_fpstatus_ptr() with generic fpstatus_ptr()
We currently have two versions of get_fpstatus_ptr(), which both take
an effectively boolean argument:
* the one for A64 takes "bool is_f16" to distinguish fp16 from other ops
* the one for A32/T32 takes "int neon" to distinguish Neon from other ops
This is confusing, and to implement ARMv8.2-FP16 the A32/T32 one will
need to make a four-way distinction between "non-Neon, FP16",
"non-Neon, single/double", "Neon, FP16" and "Neon, single/double".
The A64 version will then be a strict subset of the A32/T32 version.
To clean this all up, we want to go to a single implementation which
takes an enum argument with values FPST_FPCR, FPST_STD,
FPST_FPCR_F16, and FPST_STD_F16. We rename the function to
fpstatus_ptr() so that unconverted code gets a compilation error
rather than silently passing the wrong thing to the new function.
This commit implements that new API, and converts A64 to use it:
get_fpstatus_ptr(false) -> fpstatus_ptr(FPST_FPCR)
get_fpstatus_ptr(true) -> fpstatus_ptr(FPST_FPCR_F16)
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20200806104453.30393-2-peter.maydell@linaro.org
2020-08-06 13:44:50 +03:00
|
|
|
return statusptr;
|
|
|
|
}
|
|
|
|
|
2021-04-19 23:22:37 +03:00
|
|
|
/**
|
2023-06-06 12:19:35 +03:00
|
|
|
* finalize_memop_atom:
|
2021-04-19 23:22:37 +03:00
|
|
|
* @s: DisasContext
|
|
|
|
* @opc: size+sign+align of the memory operation
|
2023-06-06 12:19:35 +03:00
|
|
|
* @atom: atomicity of the memory operation
|
2021-04-19 23:22:37 +03:00
|
|
|
*
|
2023-06-06 12:19:35 +03:00
|
|
|
* Build the complete MemOp for a memory operation, including alignment,
|
|
|
|
* endianness, and atomicity.
|
2021-04-19 23:22:37 +03:00
|
|
|
*
|
|
|
|
* If (op & MO_AMASK) then the operation already contains the required
|
|
|
|
* alignment, e.g. for AccType_ATOMIC. Otherwise, this an optionally
|
|
|
|
* unaligned operation, e.g. for AccType_NORMAL.
|
|
|
|
*
|
|
|
|
* In the latter case, there are configuration bits that require alignment,
|
|
|
|
* and this is applied here. Note that there is no way to indicate that
|
|
|
|
* no alignment should ever be enforced; this must be handled manually.
|
|
|
|
*/
|
2023-06-06 12:19:35 +03:00
|
|
|
static inline MemOp finalize_memop_atom(DisasContext *s, MemOp opc, MemOp atom)
|
2021-04-19 23:22:37 +03:00
|
|
|
{
|
|
|
|
if (s->align_mem && !(opc & MO_AMASK)) {
|
|
|
|
opc |= MO_ALIGN;
|
|
|
|
}
|
2023-06-06 12:19:35 +03:00
|
|
|
return opc | atom | s->be_data;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* finalize_memop:
|
|
|
|
* @s: DisasContext
|
|
|
|
* @opc: size+sign+align of the memory operation
|
|
|
|
*
|
|
|
|
* Like finalize_memop_atom, but with default atomicity.
|
|
|
|
*/
|
|
|
|
static inline MemOp finalize_memop(DisasContext *s, MemOp opc)
|
|
|
|
{
|
|
|
|
MemOp atom = s->lse2 ? MO_ATOM_WITHIN16 : MO_ATOM_IFALIGN;
|
|
|
|
return finalize_memop_atom(s, opc, atom);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* finalize_memop_pair:
|
|
|
|
* @s: DisasContext
|
|
|
|
* @opc: size+sign+align of the memory operation
|
|
|
|
*
|
|
|
|
* Like finalize_memop_atom, but with atomicity for a pair.
|
|
|
|
* C.f. Pseudocode for Mem[], operand ispair.
|
|
|
|
*/
|
|
|
|
static inline MemOp finalize_memop_pair(DisasContext *s, MemOp opc)
|
|
|
|
{
|
|
|
|
MemOp atom = s->lse2 ? MO_ATOM_WITHIN16_PAIR : MO_ATOM_IFALIGN_PAIR;
|
|
|
|
return finalize_memop_atom(s, opc, atom);
|
2021-04-19 23:22:37 +03:00
|
|
|
}
|
|
|
|
|
2023-06-06 12:19:35 +03:00
|
|
|
/**
|
|
|
|
* finalize_memop_asimd:
|
|
|
|
* @s: DisasContext
|
|
|
|
* @opc: size+sign+align of the memory operation
|
|
|
|
*
|
|
|
|
* Like finalize_memop_atom, but with atomicity of AccessType_ASIMD.
|
|
|
|
*/
|
|
|
|
static inline MemOp finalize_memop_asimd(DisasContext *s, MemOp opc)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* In the pseudocode for Mem[], with AccessType_ASIMD, size == 16,
|
|
|
|
* if IsAligned(8), the first case provides separate atomicity for
|
|
|
|
* the pair of 64-bit accesses. If !IsAligned(8), the middle cases
|
|
|
|
* do not apply, and we're left with the final case of no atomicity.
|
|
|
|
* Thus MO_ATOM_IFALIGN_PAIR.
|
|
|
|
*
|
|
|
|
* For other sizes, normal LSE2 rules apply.
|
|
|
|
*/
|
|
|
|
if ((opc & MO_SIZE) == MO_128) {
|
|
|
|
return finalize_memop_atom(s, opc, MO_ATOM_IFALIGN_PAIR);
|
|
|
|
}
|
|
|
|
return finalize_memop(s, opc);
|
|
|
|
}
|
|
|
|
|
2021-06-28 16:58:20 +03:00
|
|
|
/**
|
|
|
|
* asimd_imm_const: Expand an encoded SIMD constant value
|
|
|
|
*
|
|
|
|
* Expand a SIMD constant value. This is essentially the pseudocode
|
|
|
|
* AdvSIMDExpandImm, except that we also perform the boolean NOT needed for
|
|
|
|
* VMVN and VBIC (when cmode < 14 && op == 1).
|
|
|
|
*
|
|
|
|
* The combination cmode == 15 op == 1 is a reserved encoding for AArch32;
|
2021-06-28 16:58:21 +03:00
|
|
|
* callers must catch this; we return the 64-bit constant value defined
|
|
|
|
* for AArch64.
|
2021-06-28 16:58:20 +03:00
|
|
|
*
|
|
|
|
* cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 was UNPREDICTABLE in v7A but
|
|
|
|
* is either not unpredictable or merely CONSTRAINED UNPREDICTABLE in v8A;
|
|
|
|
* we produce an immediate constant value of 0 in these cases.
|
|
|
|
*/
|
|
|
|
uint64_t asimd_imm_const(uint32_t imm, int cmode, int op);
|
|
|
|
|
2022-10-20 06:06:41 +03:00
|
|
|
/*
|
|
|
|
* gen_disas_label:
|
|
|
|
* Create a label and cache a copy of pc_save.
|
|
|
|
*/
|
|
|
|
static inline DisasLabel gen_disas_label(DisasContext *s)
|
|
|
|
{
|
|
|
|
return (DisasLabel){
|
|
|
|
.label = gen_new_label(),
|
|
|
|
.pc_save = s->pc_save,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* set_disas_label:
|
|
|
|
* Emit a label and restore the cached copy of pc_save.
|
|
|
|
*/
|
|
|
|
static inline void set_disas_label(DisasContext *s, DisasLabel l)
|
|
|
|
{
|
|
|
|
gen_set_label(l.label);
|
|
|
|
s->pc_save = l.pc_save;
|
|
|
|
}
|
|
|
|
|
target/arm: Look up ARMCPRegInfo at runtime
Do not encode the pointer as a constant in the opcode stream.
This pointer is specific to the cpu that first generated the
translation, which runs into problems with both hot-pluggable
cpus and user-only threads, as cpus are removed. It's also a
potential correctness issue in the theoretical case of a
slightly-heterogenous system, because if CPU 0 generates a
TB and then CPU 1 executes it, CPU 1 will end up using CPU 0's
hash table, which might have a wrong set of registers in it.
(All our current systems are either completely homogenous,
M-profile, or have CPUs sufficiently different that they
wouldn't be sharing TBs anyway because the differences would
show up in the TB flags, so the correctness issue is only
theoretical, not practical.)
Perform the lookup in either helper_access_check_cp_reg,
or a new helper_lookup_cp_reg.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230106194451.1213153-3-richard.henderson@linaro.org
[PMM: added note in commit message about correctness issue]
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2023-01-06 22:44:51 +03:00
|
|
|
static inline TCGv_ptr gen_lookup_cp_reg(uint32_t key)
|
|
|
|
{
|
|
|
|
TCGv_ptr ret = tcg_temp_new_ptr();
|
2023-09-14 02:37:36 +03:00
|
|
|
gen_helper_lookup_cp_reg(ret, tcg_env, tcg_constant_i32(key));
|
target/arm: Look up ARMCPRegInfo at runtime
Do not encode the pointer as a constant in the opcode stream.
This pointer is specific to the cpu that first generated the
translation, which runs into problems with both hot-pluggable
cpus and user-only threads, as cpus are removed. It's also a
potential correctness issue in the theoretical case of a
slightly-heterogenous system, because if CPU 0 generates a
TB and then CPU 1 executes it, CPU 1 will end up using CPU 0's
hash table, which might have a wrong set of registers in it.
(All our current systems are either completely homogenous,
M-profile, or have CPUs sufficiently different that they
wouldn't be sharing TBs anyway because the differences would
show up in the TB flags, so the correctness issue is only
theoretical, not practical.)
Perform the lookup in either helper_access_check_cp_reg,
or a new helper_lookup_cp_reg.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230106194451.1213153-3-richard.henderson@linaro.org
[PMM: added note in commit message about correctness issue]
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2023-01-06 22:44:51 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-02-26 00:06:53 +03:00
|
|
|
/*
|
|
|
|
* Set and reset rounding mode around another operation.
|
|
|
|
*/
|
|
|
|
static inline TCGv_i32 gen_set_rmode(ARMFPRounding rmode, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
TCGv_i32 new = tcg_constant_i32(arm_rmode_to_sf(rmode));
|
|
|
|
TCGv_i32 old = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
gen_helper_set_rmode(old, new, fpst);
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_restore_rmode(TCGv_i32 old, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
gen_helper_set_rmode(old, old, fpst);
|
|
|
|
}
|
|
|
|
|
2022-05-27 21:17:14 +03:00
|
|
|
/*
|
|
|
|
* Helpers for implementing sets of trans_* functions.
|
|
|
|
* Defer the implementation of NAME to FUNC, with optional extra arguments.
|
|
|
|
*/
|
|
|
|
#define TRANS(NAME, FUNC, ...) \
|
|
|
|
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
|
|
|
|
{ return FUNC(s, __VA_ARGS__); }
|
|
|
|
#define TRANS_FEAT(NAME, FEAT, FUNC, ...) \
|
|
|
|
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
|
|
|
|
{ return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); }
|
|
|
|
|
2022-07-08 18:14:59 +03:00
|
|
|
#define TRANS_FEAT_NONSTREAMING(NAME, FEAT, FUNC, ...) \
|
|
|
|
static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
|
|
|
|
{ \
|
|
|
|
s->is_nonstreaming = true; \
|
|
|
|
return dc_isar_feature(FEAT, s) && FUNC(s, __VA_ARGS__); \
|
|
|
|
}
|
|
|
|
|
2013-09-03 23:12:03 +04:00
|
|
|
#endif /* TARGET_ARM_TRANSLATE_H */
|