6e591a8569
We copied the data from the general register input to the vector register output, but have not yet replicated it. We intended to fall through into the vector-vector case, but failed to redirect the input register. This is caught by an assertion failure in tcg_out_insn_VRIc, which diagnosed the incorrect register class. Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
3485 lines
107 KiB
C++
3485 lines
107 KiB
C++
/*
|
|
* Tiny Code Generator for QEMU
|
|
*
|
|
* Copyright (c) 2009 Ulrich Hecht <uli@suse.de>
|
|
* Copyright (c) 2009 Alexander Graf <agraf@suse.de>
|
|
* Copyright (c) 2010 Richard Henderson <rth@twiddle.net>
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
* in the Software without restriction, including without limitation the rights
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
* furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
* THE SOFTWARE.
|
|
*/
|
|
|
|
/* We only support generating code for 64-bit mode. */
|
|
#if TCG_TARGET_REG_BITS != 64
|
|
#error "unsupported code generation mode"
|
|
#endif
|
|
|
|
#include "../tcg-ldst.c.inc"
|
|
#include "../tcg-pool.c.inc"
|
|
#include "elf.h"
|
|
|
|
/* ??? The translation blocks produced by TCG are generally small enough to
|
|
be entirely reachable with a 16-bit displacement. Leaving the option for
|
|
a 32-bit displacement here Just In Case. */
|
|
#define USE_LONG_BRANCHES 0
|
|
|
|
#define TCG_CT_CONST_S16 0x100
|
|
#define TCG_CT_CONST_S32 0x200
|
|
#define TCG_CT_CONST_S33 0x400
|
|
#define TCG_CT_CONST_ZERO 0x800
|
|
|
|
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 16)
|
|
#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
|
|
|
|
/*
|
|
* For softmmu, we need to avoid conflicts with the first 3
|
|
* argument registers to perform the tlb lookup, and to call
|
|
* the helper function.
|
|
*/
|
|
#ifdef CONFIG_SOFTMMU
|
|
#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_R2, 3)
|
|
#else
|
|
#define SOFTMMU_RESERVE_REGS 0
|
|
#endif
|
|
|
|
|
|
/* Several places within the instruction set 0 means "no register"
|
|
rather than TCG_REG_R0. */
|
|
#define TCG_REG_NONE 0
|
|
|
|
/* A scratch register that may be be used throughout the backend. */
|
|
#define TCG_TMP0 TCG_REG_R1
|
|
|
|
/* A scratch register that holds a pointer to the beginning of the TB.
|
|
We don't need this when we have pc-relative loads with the general
|
|
instructions extension facility. */
|
|
#define TCG_REG_TB TCG_REG_R12
|
|
#define USE_REG_TB (!HAVE_FACILITY(GEN_INST_EXT))
|
|
|
|
#ifndef CONFIG_SOFTMMU
|
|
#define TCG_GUEST_BASE_REG TCG_REG_R13
|
|
#endif
|
|
|
|
/* All of the following instructions are prefixed with their instruction
|
|
format, and are defined as 8- or 16-bit quantities, even when the two
|
|
halves of the 16-bit quantity may appear 32 bits apart in the insn.
|
|
This makes it easy to copy the values from the tables in Appendix B. */
|
|
typedef enum S390Opcode {
|
|
RIL_AFI = 0xc209,
|
|
RIL_AGFI = 0xc208,
|
|
RIL_ALFI = 0xc20b,
|
|
RIL_ALGFI = 0xc20a,
|
|
RIL_BRASL = 0xc005,
|
|
RIL_BRCL = 0xc004,
|
|
RIL_CFI = 0xc20d,
|
|
RIL_CGFI = 0xc20c,
|
|
RIL_CLFI = 0xc20f,
|
|
RIL_CLGFI = 0xc20e,
|
|
RIL_CLRL = 0xc60f,
|
|
RIL_CLGRL = 0xc60a,
|
|
RIL_CRL = 0xc60d,
|
|
RIL_CGRL = 0xc608,
|
|
RIL_IIHF = 0xc008,
|
|
RIL_IILF = 0xc009,
|
|
RIL_LARL = 0xc000,
|
|
RIL_LGFI = 0xc001,
|
|
RIL_LGRL = 0xc408,
|
|
RIL_LLIHF = 0xc00e,
|
|
RIL_LLILF = 0xc00f,
|
|
RIL_LRL = 0xc40d,
|
|
RIL_MSFI = 0xc201,
|
|
RIL_MSGFI = 0xc200,
|
|
RIL_NIHF = 0xc00a,
|
|
RIL_NILF = 0xc00b,
|
|
RIL_OIHF = 0xc00c,
|
|
RIL_OILF = 0xc00d,
|
|
RIL_SLFI = 0xc205,
|
|
RIL_SLGFI = 0xc204,
|
|
RIL_XIHF = 0xc006,
|
|
RIL_XILF = 0xc007,
|
|
|
|
RI_AGHI = 0xa70b,
|
|
RI_AHI = 0xa70a,
|
|
RI_BRC = 0xa704,
|
|
RI_CHI = 0xa70e,
|
|
RI_CGHI = 0xa70f,
|
|
RI_IIHH = 0xa500,
|
|
RI_IIHL = 0xa501,
|
|
RI_IILH = 0xa502,
|
|
RI_IILL = 0xa503,
|
|
RI_LGHI = 0xa709,
|
|
RI_LLIHH = 0xa50c,
|
|
RI_LLIHL = 0xa50d,
|
|
RI_LLILH = 0xa50e,
|
|
RI_LLILL = 0xa50f,
|
|
RI_MGHI = 0xa70d,
|
|
RI_MHI = 0xa70c,
|
|
RI_NIHH = 0xa504,
|
|
RI_NIHL = 0xa505,
|
|
RI_NILH = 0xa506,
|
|
RI_NILL = 0xa507,
|
|
RI_OIHH = 0xa508,
|
|
RI_OIHL = 0xa509,
|
|
RI_OILH = 0xa50a,
|
|
RI_OILL = 0xa50b,
|
|
RI_TMLL = 0xa701,
|
|
|
|
RIE_CGIJ = 0xec7c,
|
|
RIE_CGRJ = 0xec64,
|
|
RIE_CIJ = 0xec7e,
|
|
RIE_CLGRJ = 0xec65,
|
|
RIE_CLIJ = 0xec7f,
|
|
RIE_CLGIJ = 0xec7d,
|
|
RIE_CLRJ = 0xec77,
|
|
RIE_CRJ = 0xec76,
|
|
RIE_LOCGHI = 0xec46,
|
|
RIE_RISBG = 0xec55,
|
|
|
|
RRE_AGR = 0xb908,
|
|
RRE_ALGR = 0xb90a,
|
|
RRE_ALCR = 0xb998,
|
|
RRE_ALCGR = 0xb988,
|
|
RRE_CGR = 0xb920,
|
|
RRE_CLGR = 0xb921,
|
|
RRE_DLGR = 0xb987,
|
|
RRE_DLR = 0xb997,
|
|
RRE_DSGFR = 0xb91d,
|
|
RRE_DSGR = 0xb90d,
|
|
RRE_FLOGR = 0xb983,
|
|
RRE_LGBR = 0xb906,
|
|
RRE_LCGR = 0xb903,
|
|
RRE_LGFR = 0xb914,
|
|
RRE_LGHR = 0xb907,
|
|
RRE_LGR = 0xb904,
|
|
RRE_LLGCR = 0xb984,
|
|
RRE_LLGFR = 0xb916,
|
|
RRE_LLGHR = 0xb985,
|
|
RRE_LRVR = 0xb91f,
|
|
RRE_LRVGR = 0xb90f,
|
|
RRE_LTGR = 0xb902,
|
|
RRE_MLGR = 0xb986,
|
|
RRE_MSGR = 0xb90c,
|
|
RRE_MSR = 0xb252,
|
|
RRE_NGR = 0xb980,
|
|
RRE_OGR = 0xb981,
|
|
RRE_SGR = 0xb909,
|
|
RRE_SLGR = 0xb90b,
|
|
RRE_SLBR = 0xb999,
|
|
RRE_SLBGR = 0xb989,
|
|
RRE_XGR = 0xb982,
|
|
|
|
RRF_LOCR = 0xb9f2,
|
|
RRF_LOCGR = 0xb9e2,
|
|
RRF_NRK = 0xb9f4,
|
|
RRF_NGRK = 0xb9e4,
|
|
RRF_ORK = 0xb9f6,
|
|
RRF_OGRK = 0xb9e6,
|
|
RRF_SRK = 0xb9f9,
|
|
RRF_SGRK = 0xb9e9,
|
|
RRF_SLRK = 0xb9fb,
|
|
RRF_SLGRK = 0xb9eb,
|
|
RRF_XRK = 0xb9f7,
|
|
RRF_XGRK = 0xb9e7,
|
|
|
|
RR_AR = 0x1a,
|
|
RR_ALR = 0x1e,
|
|
RR_BASR = 0x0d,
|
|
RR_BCR = 0x07,
|
|
RR_CLR = 0x15,
|
|
RR_CR = 0x19,
|
|
RR_DR = 0x1d,
|
|
RR_LCR = 0x13,
|
|
RR_LR = 0x18,
|
|
RR_LTR = 0x12,
|
|
RR_NR = 0x14,
|
|
RR_OR = 0x16,
|
|
RR_SR = 0x1b,
|
|
RR_SLR = 0x1f,
|
|
RR_XR = 0x17,
|
|
|
|
RSY_RLL = 0xeb1d,
|
|
RSY_RLLG = 0xeb1c,
|
|
RSY_SLLG = 0xeb0d,
|
|
RSY_SLLK = 0xebdf,
|
|
RSY_SRAG = 0xeb0a,
|
|
RSY_SRAK = 0xebdc,
|
|
RSY_SRLG = 0xeb0c,
|
|
RSY_SRLK = 0xebde,
|
|
|
|
RS_SLL = 0x89,
|
|
RS_SRA = 0x8a,
|
|
RS_SRL = 0x88,
|
|
|
|
RXY_AG = 0xe308,
|
|
RXY_AY = 0xe35a,
|
|
RXY_CG = 0xe320,
|
|
RXY_CLG = 0xe321,
|
|
RXY_CLY = 0xe355,
|
|
RXY_CY = 0xe359,
|
|
RXY_LAY = 0xe371,
|
|
RXY_LB = 0xe376,
|
|
RXY_LG = 0xe304,
|
|
RXY_LGB = 0xe377,
|
|
RXY_LGF = 0xe314,
|
|
RXY_LGH = 0xe315,
|
|
RXY_LHY = 0xe378,
|
|
RXY_LLGC = 0xe390,
|
|
RXY_LLGF = 0xe316,
|
|
RXY_LLGH = 0xe391,
|
|
RXY_LMG = 0xeb04,
|
|
RXY_LRV = 0xe31e,
|
|
RXY_LRVG = 0xe30f,
|
|
RXY_LRVH = 0xe31f,
|
|
RXY_LY = 0xe358,
|
|
RXY_NG = 0xe380,
|
|
RXY_OG = 0xe381,
|
|
RXY_STCY = 0xe372,
|
|
RXY_STG = 0xe324,
|
|
RXY_STHY = 0xe370,
|
|
RXY_STMG = 0xeb24,
|
|
RXY_STRV = 0xe33e,
|
|
RXY_STRVG = 0xe32f,
|
|
RXY_STRVH = 0xe33f,
|
|
RXY_STY = 0xe350,
|
|
RXY_XG = 0xe382,
|
|
|
|
RX_A = 0x5a,
|
|
RX_C = 0x59,
|
|
RX_L = 0x58,
|
|
RX_LA = 0x41,
|
|
RX_LH = 0x48,
|
|
RX_ST = 0x50,
|
|
RX_STC = 0x42,
|
|
RX_STH = 0x40,
|
|
|
|
VRIa_VGBM = 0xe744,
|
|
VRIa_VREPI = 0xe745,
|
|
VRIb_VGM = 0xe746,
|
|
VRIc_VREP = 0xe74d,
|
|
|
|
VRRa_VLC = 0xe7de,
|
|
VRRa_VLP = 0xe7df,
|
|
VRRa_VLR = 0xe756,
|
|
VRRc_VA = 0xe7f3,
|
|
VRRc_VCEQ = 0xe7f8, /* we leave the m5 cs field 0 */
|
|
VRRc_VCH = 0xe7fb, /* " */
|
|
VRRc_VCHL = 0xe7f9, /* " */
|
|
VRRc_VERLLV = 0xe773,
|
|
VRRc_VESLV = 0xe770,
|
|
VRRc_VESRAV = 0xe77a,
|
|
VRRc_VESRLV = 0xe778,
|
|
VRRc_VML = 0xe7a2,
|
|
VRRc_VMN = 0xe7fe,
|
|
VRRc_VMNL = 0xe7fc,
|
|
VRRc_VMX = 0xe7ff,
|
|
VRRc_VMXL = 0xe7fd,
|
|
VRRc_VN = 0xe768,
|
|
VRRc_VNC = 0xe769,
|
|
VRRc_VNN = 0xe76e,
|
|
VRRc_VNO = 0xe76b,
|
|
VRRc_VNX = 0xe76c,
|
|
VRRc_VO = 0xe76a,
|
|
VRRc_VOC = 0xe76f,
|
|
VRRc_VPKS = 0xe797, /* we leave the m5 cs field 0 */
|
|
VRRc_VS = 0xe7f7,
|
|
VRRa_VUPH = 0xe7d7,
|
|
VRRa_VUPL = 0xe7d6,
|
|
VRRc_VX = 0xe76d,
|
|
VRRe_VSEL = 0xe78d,
|
|
VRRf_VLVGP = 0xe762,
|
|
|
|
VRSa_VERLL = 0xe733,
|
|
VRSa_VESL = 0xe730,
|
|
VRSa_VESRA = 0xe73a,
|
|
VRSa_VESRL = 0xe738,
|
|
VRSb_VLVG = 0xe722,
|
|
VRSc_VLGV = 0xe721,
|
|
|
|
VRX_VL = 0xe706,
|
|
VRX_VLLEZ = 0xe704,
|
|
VRX_VLREP = 0xe705,
|
|
VRX_VST = 0xe70e,
|
|
VRX_VSTEF = 0xe70b,
|
|
VRX_VSTEG = 0xe70a,
|
|
|
|
NOP = 0x0707,
|
|
} S390Opcode;
|
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
|
"%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
|
|
"%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
"%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
|
|
"%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
|
|
"%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
|
|
"%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
|
|
};
|
|
#endif
|
|
|
|
/* Since R6 is a potential argument register, choose it last of the
|
|
call-saved registers. Likewise prefer the call-clobbered registers
|
|
in reverse order to maximize the chance of avoiding the arguments. */
|
|
static const int tcg_target_reg_alloc_order[] = {
|
|
/* Call saved registers. */
|
|
TCG_REG_R13,
|
|
TCG_REG_R12,
|
|
TCG_REG_R11,
|
|
TCG_REG_R10,
|
|
TCG_REG_R9,
|
|
TCG_REG_R8,
|
|
TCG_REG_R7,
|
|
TCG_REG_R6,
|
|
/* Call clobbered registers. */
|
|
TCG_REG_R14,
|
|
TCG_REG_R0,
|
|
TCG_REG_R1,
|
|
/* Argument registers, in reverse order of allocation. */
|
|
TCG_REG_R5,
|
|
TCG_REG_R4,
|
|
TCG_REG_R3,
|
|
TCG_REG_R2,
|
|
|
|
/* V8-V15 are call saved, and omitted. */
|
|
TCG_REG_V0,
|
|
TCG_REG_V1,
|
|
TCG_REG_V2,
|
|
TCG_REG_V3,
|
|
TCG_REG_V4,
|
|
TCG_REG_V5,
|
|
TCG_REG_V6,
|
|
TCG_REG_V7,
|
|
TCG_REG_V16,
|
|
TCG_REG_V17,
|
|
TCG_REG_V18,
|
|
TCG_REG_V19,
|
|
TCG_REG_V20,
|
|
TCG_REG_V21,
|
|
TCG_REG_V22,
|
|
TCG_REG_V23,
|
|
TCG_REG_V24,
|
|
TCG_REG_V25,
|
|
TCG_REG_V26,
|
|
TCG_REG_V27,
|
|
TCG_REG_V28,
|
|
TCG_REG_V29,
|
|
TCG_REG_V30,
|
|
TCG_REG_V31,
|
|
};
|
|
|
|
static const int tcg_target_call_iarg_regs[] = {
|
|
TCG_REG_R2,
|
|
TCG_REG_R3,
|
|
TCG_REG_R4,
|
|
TCG_REG_R5,
|
|
TCG_REG_R6,
|
|
};
|
|
|
|
static const int tcg_target_call_oarg_regs[] = {
|
|
TCG_REG_R2,
|
|
};
|
|
|
|
#define S390_CC_EQ 8
|
|
#define S390_CC_LT 4
|
|
#define S390_CC_GT 2
|
|
#define S390_CC_OV 1
|
|
#define S390_CC_NE (S390_CC_LT | S390_CC_GT)
|
|
#define S390_CC_LE (S390_CC_LT | S390_CC_EQ)
|
|
#define S390_CC_GE (S390_CC_GT | S390_CC_EQ)
|
|
#define S390_CC_NEVER 0
|
|
#define S390_CC_ALWAYS 15
|
|
|
|
/* Condition codes that result from a COMPARE and COMPARE LOGICAL. */
|
|
static const uint8_t tcg_cond_to_s390_cond[] = {
|
|
[TCG_COND_EQ] = S390_CC_EQ,
|
|
[TCG_COND_NE] = S390_CC_NE,
|
|
[TCG_COND_LT] = S390_CC_LT,
|
|
[TCG_COND_LE] = S390_CC_LE,
|
|
[TCG_COND_GT] = S390_CC_GT,
|
|
[TCG_COND_GE] = S390_CC_GE,
|
|
[TCG_COND_LTU] = S390_CC_LT,
|
|
[TCG_COND_LEU] = S390_CC_LE,
|
|
[TCG_COND_GTU] = S390_CC_GT,
|
|
[TCG_COND_GEU] = S390_CC_GE,
|
|
};
|
|
|
|
/* Condition codes that result from a LOAD AND TEST. Here, we have no
|
|
unsigned instruction variation, however since the test is vs zero we
|
|
can re-map the outcomes appropriately. */
|
|
static const uint8_t tcg_cond_to_ltr_cond[] = {
|
|
[TCG_COND_EQ] = S390_CC_EQ,
|
|
[TCG_COND_NE] = S390_CC_NE,
|
|
[TCG_COND_LT] = S390_CC_LT,
|
|
[TCG_COND_LE] = S390_CC_LE,
|
|
[TCG_COND_GT] = S390_CC_GT,
|
|
[TCG_COND_GE] = S390_CC_GE,
|
|
[TCG_COND_LTU] = S390_CC_NEVER,
|
|
[TCG_COND_LEU] = S390_CC_EQ,
|
|
[TCG_COND_GTU] = S390_CC_NE,
|
|
[TCG_COND_GEU] = S390_CC_ALWAYS,
|
|
};
|
|
|
|
#ifdef CONFIG_SOFTMMU
|
|
static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
|
|
[MO_UB] = helper_ret_ldub_mmu,
|
|
[MO_SB] = helper_ret_ldsb_mmu,
|
|
[MO_LEUW] = helper_le_lduw_mmu,
|
|
[MO_LESW] = helper_le_ldsw_mmu,
|
|
[MO_LEUL] = helper_le_ldul_mmu,
|
|
[MO_LESL] = helper_le_ldsl_mmu,
|
|
[MO_LEUQ] = helper_le_ldq_mmu,
|
|
[MO_BEUW] = helper_be_lduw_mmu,
|
|
[MO_BESW] = helper_be_ldsw_mmu,
|
|
[MO_BEUL] = helper_be_ldul_mmu,
|
|
[MO_BESL] = helper_be_ldsl_mmu,
|
|
[MO_BEUQ] = helper_be_ldq_mmu,
|
|
};
|
|
|
|
static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
|
|
[MO_UB] = helper_ret_stb_mmu,
|
|
[MO_LEUW] = helper_le_stw_mmu,
|
|
[MO_LEUL] = helper_le_stl_mmu,
|
|
[MO_LEUQ] = helper_le_stq_mmu,
|
|
[MO_BEUW] = helper_be_stw_mmu,
|
|
[MO_BEUL] = helper_be_stl_mmu,
|
|
[MO_BEUQ] = helper_be_stq_mmu,
|
|
};
|
|
#endif
|
|
|
|
static const tcg_insn_unit *tb_ret_addr;
|
|
uint64_t s390_facilities[3];
|
|
|
|
static inline bool is_general_reg(TCGReg r)
|
|
{
|
|
return r <= TCG_REG_R15;
|
|
}
|
|
|
|
static inline bool is_vector_reg(TCGReg r)
|
|
{
|
|
return r >= TCG_REG_V0 && r <= TCG_REG_V31;
|
|
}
|
|
|
|
static bool patch_reloc(tcg_insn_unit *src_rw, int type,
|
|
intptr_t value, intptr_t addend)
|
|
{
|
|
const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
|
|
intptr_t pcrel2;
|
|
uint32_t old;
|
|
|
|
value += addend;
|
|
pcrel2 = (tcg_insn_unit *)value - src_rx;
|
|
|
|
switch (type) {
|
|
case R_390_PC16DBL:
|
|
if (pcrel2 == (int16_t)pcrel2) {
|
|
tcg_patch16(src_rw, pcrel2);
|
|
return true;
|
|
}
|
|
break;
|
|
case R_390_PC32DBL:
|
|
if (pcrel2 == (int32_t)pcrel2) {
|
|
tcg_patch32(src_rw, pcrel2);
|
|
return true;
|
|
}
|
|
break;
|
|
case R_390_20:
|
|
if (value == sextract64(value, 0, 20)) {
|
|
old = *(uint32_t *)src_rw & 0xf00000ff;
|
|
old |= ((value & 0xfff) << 16) | ((value & 0xff000) >> 4);
|
|
tcg_patch32(src_rw, old);
|
|
return true;
|
|
}
|
|
break;
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
return false;
|
|
}
|
|
|
|
/* Test if a constant matches the constraint. */
|
|
static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
|
|
{
|
|
if (ct & TCG_CT_CONST) {
|
|
return 1;
|
|
}
|
|
|
|
if (type == TCG_TYPE_I32) {
|
|
val = (int32_t)val;
|
|
}
|
|
|
|
/* The following are mutually exclusive. */
|
|
if (ct & TCG_CT_CONST_S16) {
|
|
return val == (int16_t)val;
|
|
} else if (ct & TCG_CT_CONST_S32) {
|
|
return val == (int32_t)val;
|
|
} else if (ct & TCG_CT_CONST_S33) {
|
|
return val >= -0xffffffffll && val <= 0xffffffffll;
|
|
} else if (ct & TCG_CT_CONST_ZERO) {
|
|
return val == 0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Emit instructions according to the given instruction format. */
|
|
|
|
static void tcg_out_insn_RR(TCGContext *s, S390Opcode op, TCGReg r1, TCGReg r2)
|
|
{
|
|
tcg_out16(s, (op << 8) | (r1 << 4) | r2);
|
|
}
|
|
|
|
static void tcg_out_insn_RRE(TCGContext *s, S390Opcode op,
|
|
TCGReg r1, TCGReg r2)
|
|
{
|
|
tcg_out32(s, (op << 16) | (r1 << 4) | r2);
|
|
}
|
|
|
|
static void tcg_out_insn_RRF(TCGContext *s, S390Opcode op,
|
|
TCGReg r1, TCGReg r2, int m3)
|
|
{
|
|
tcg_out32(s, (op << 16) | (m3 << 12) | (r1 << 4) | r2);
|
|
}
|
|
|
|
static void tcg_out_insn_RI(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
|
|
{
|
|
tcg_out32(s, (op << 16) | (r1 << 20) | (i2 & 0xffff));
|
|
}
|
|
|
|
static void tcg_out_insn_RIE(TCGContext *s, S390Opcode op, TCGReg r1,
|
|
int i2, int m3)
|
|
{
|
|
tcg_out16(s, (op & 0xff00) | (r1 << 4) | m3);
|
|
tcg_out32(s, (i2 << 16) | (op & 0xff));
|
|
}
|
|
|
|
static void tcg_out_insn_RIL(TCGContext *s, S390Opcode op, TCGReg r1, int i2)
|
|
{
|
|
tcg_out16(s, op | (r1 << 4));
|
|
tcg_out32(s, i2);
|
|
}
|
|
|
|
static void tcg_out_insn_RS(TCGContext *s, S390Opcode op, TCGReg r1,
|
|
TCGReg b2, TCGReg r3, int disp)
|
|
{
|
|
tcg_out32(s, (op << 24) | (r1 << 20) | (r3 << 16) | (b2 << 12)
|
|
| (disp & 0xfff));
|
|
}
|
|
|
|
static void tcg_out_insn_RSY(TCGContext *s, S390Opcode op, TCGReg r1,
|
|
TCGReg b2, TCGReg r3, int disp)
|
|
{
|
|
tcg_out16(s, (op & 0xff00) | (r1 << 4) | r3);
|
|
tcg_out32(s, (op & 0xff) | (b2 << 28)
|
|
| ((disp & 0xfff) << 16) | ((disp & 0xff000) >> 4));
|
|
}
|
|
|
|
#define tcg_out_insn_RX tcg_out_insn_RS
|
|
#define tcg_out_insn_RXY tcg_out_insn_RSY
|
|
|
|
static int RXB(TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
|
|
{
|
|
/*
|
|
* Shift bit 4 of each regno to its corresponding bit of RXB.
|
|
* RXB itself begins at bit 8 of the instruction so 8 - 4 = 4
|
|
* is the left-shift of the 4th operand.
|
|
*/
|
|
return ((v1 & 0x10) << (4 + 3))
|
|
| ((v2 & 0x10) << (4 + 2))
|
|
| ((v3 & 0x10) << (4 + 1))
|
|
| ((v4 & 0x10) << (4 + 0));
|
|
}
|
|
|
|
static void tcg_out_insn_VRIa(TCGContext *s, S390Opcode op,
|
|
TCGReg v1, uint16_t i2, int m3)
|
|
{
|
|
tcg_debug_assert(is_vector_reg(v1));
|
|
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
|
|
tcg_out16(s, i2);
|
|
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
|
|
}
|
|
|
|
static void tcg_out_insn_VRIb(TCGContext *s, S390Opcode op,
|
|
TCGReg v1, uint8_t i2, uint8_t i3, int m4)
|
|
{
|
|
tcg_debug_assert(is_vector_reg(v1));
|
|
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4));
|
|
tcg_out16(s, (i2 << 8) | (i3 & 0xff));
|
|
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
|
|
}
|
|
|
|
static void tcg_out_insn_VRIc(TCGContext *s, S390Opcode op,
|
|
TCGReg v1, uint16_t i2, TCGReg v3, int m4)
|
|
{
|
|
tcg_debug_assert(is_vector_reg(v1));
|
|
tcg_debug_assert(is_vector_reg(v3));
|
|
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
|
|
tcg_out16(s, i2);
|
|
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
|
|
}
|
|
|
|
static void tcg_out_insn_VRRa(TCGContext *s, S390Opcode op,
|
|
TCGReg v1, TCGReg v2, int m3)
|
|
{
|
|
tcg_debug_assert(is_vector_reg(v1));
|
|
tcg_debug_assert(is_vector_reg(v2));
|
|
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
|
|
tcg_out32(s, (op & 0x00ff) | RXB(v1, v2, 0, 0) | (m3 << 12));
|
|
}
|
|
|
|
static void tcg_out_insn_VRRc(TCGContext *s, S390Opcode op,
|
|
TCGReg v1, TCGReg v2, TCGReg v3, int m4)
|
|
{
|
|
tcg_debug_assert(is_vector_reg(v1));
|
|
tcg_debug_assert(is_vector_reg(v2));
|
|
tcg_debug_assert(is_vector_reg(v3));
|
|
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
|
|
tcg_out16(s, v3 << 12);
|
|
tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, 0) | (m4 << 12));
|
|
}
|
|
|
|
static void tcg_out_insn_VRRe(TCGContext *s, S390Opcode op,
|
|
TCGReg v1, TCGReg v2, TCGReg v3, TCGReg v4)
|
|
{
|
|
tcg_debug_assert(is_vector_reg(v1));
|
|
tcg_debug_assert(is_vector_reg(v2));
|
|
tcg_debug_assert(is_vector_reg(v3));
|
|
tcg_debug_assert(is_vector_reg(v4));
|
|
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v2 & 0xf));
|
|
tcg_out16(s, v3 << 12);
|
|
tcg_out16(s, (op & 0x00ff) | RXB(v1, v2, v3, v4) | (v4 << 12));
|
|
}
|
|
|
|
static void tcg_out_insn_VRRf(TCGContext *s, S390Opcode op,
|
|
TCGReg v1, TCGReg r2, TCGReg r3)
|
|
{
|
|
tcg_debug_assert(is_vector_reg(v1));
|
|
tcg_debug_assert(is_general_reg(r2));
|
|
tcg_debug_assert(is_general_reg(r3));
|
|
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r2);
|
|
tcg_out16(s, r3 << 12);
|
|
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0));
|
|
}
|
|
|
|
static void tcg_out_insn_VRSa(TCGContext *s, S390Opcode op, TCGReg v1,
|
|
intptr_t d2, TCGReg b2, TCGReg v3, int m4)
|
|
{
|
|
tcg_debug_assert(is_vector_reg(v1));
|
|
tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
|
|
tcg_debug_assert(is_general_reg(b2));
|
|
tcg_debug_assert(is_vector_reg(v3));
|
|
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | (v3 & 0xf));
|
|
tcg_out16(s, b2 << 12 | d2);
|
|
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, v3, 0) | (m4 << 12));
|
|
}
|
|
|
|
static void tcg_out_insn_VRSb(TCGContext *s, S390Opcode op, TCGReg v1,
|
|
intptr_t d2, TCGReg b2, TCGReg r3, int m4)
|
|
{
|
|
tcg_debug_assert(is_vector_reg(v1));
|
|
tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
|
|
tcg_debug_assert(is_general_reg(b2));
|
|
tcg_debug_assert(is_general_reg(r3));
|
|
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | r3);
|
|
tcg_out16(s, b2 << 12 | d2);
|
|
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m4 << 12));
|
|
}
|
|
|
|
static void tcg_out_insn_VRSc(TCGContext *s, S390Opcode op, TCGReg r1,
|
|
intptr_t d2, TCGReg b2, TCGReg v3, int m4)
|
|
{
|
|
tcg_debug_assert(is_general_reg(r1));
|
|
tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
|
|
tcg_debug_assert(is_general_reg(b2));
|
|
tcg_debug_assert(is_vector_reg(v3));
|
|
tcg_out16(s, (op & 0xff00) | (r1 << 4) | (v3 & 0xf));
|
|
tcg_out16(s, b2 << 12 | d2);
|
|
tcg_out16(s, (op & 0x00ff) | RXB(0, 0, v3, 0) | (m4 << 12));
|
|
}
|
|
|
|
static void tcg_out_insn_VRX(TCGContext *s, S390Opcode op, TCGReg v1,
|
|
TCGReg b2, TCGReg x2, intptr_t d2, int m3)
|
|
{
|
|
tcg_debug_assert(is_vector_reg(v1));
|
|
tcg_debug_assert(d2 >= 0 && d2 <= 0xfff);
|
|
tcg_debug_assert(is_general_reg(x2));
|
|
tcg_debug_assert(is_general_reg(b2));
|
|
tcg_out16(s, (op & 0xff00) | ((v1 & 0xf) << 4) | x2);
|
|
tcg_out16(s, (b2 << 12) | d2);
|
|
tcg_out16(s, (op & 0x00ff) | RXB(v1, 0, 0, 0) | (m3 << 12));
|
|
}
|
|
|
|
/* Emit an opcode with "type-checking" of the format. */
|
|
#define tcg_out_insn(S, FMT, OP, ...) \
|
|
glue(tcg_out_insn_,FMT)(S, glue(glue(FMT,_),OP), ## __VA_ARGS__)
|
|
|
|
|
|
/* emit 64-bit shifts */
|
|
static void tcg_out_sh64(TCGContext* s, S390Opcode op, TCGReg dest,
|
|
TCGReg src, TCGReg sh_reg, int sh_imm)
|
|
{
|
|
tcg_out_insn_RSY(s, op, dest, sh_reg, src, sh_imm);
|
|
}
|
|
|
|
/* emit 32-bit shifts */
|
|
static void tcg_out_sh32(TCGContext* s, S390Opcode op, TCGReg dest,
|
|
TCGReg sh_reg, int sh_imm)
|
|
{
|
|
tcg_out_insn_RS(s, op, dest, sh_reg, 0, sh_imm);
|
|
}
|
|
|
|
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
|
|
{
|
|
if (src == dst) {
|
|
return true;
|
|
}
|
|
switch (type) {
|
|
case TCG_TYPE_I32:
|
|
if (likely(is_general_reg(dst) && is_general_reg(src))) {
|
|
tcg_out_insn(s, RR, LR, dst, src);
|
|
break;
|
|
}
|
|
/* fallthru */
|
|
|
|
case TCG_TYPE_I64:
|
|
if (likely(is_general_reg(dst))) {
|
|
if (likely(is_general_reg(src))) {
|
|
tcg_out_insn(s, RRE, LGR, dst, src);
|
|
} else {
|
|
tcg_out_insn(s, VRSc, VLGV, dst, 0, 0, src, 3);
|
|
}
|
|
break;
|
|
} else if (is_general_reg(src)) {
|
|
tcg_out_insn(s, VRSb, VLVG, dst, 0, 0, src, 3);
|
|
break;
|
|
}
|
|
/* fallthru */
|
|
|
|
case TCG_TYPE_V64:
|
|
case TCG_TYPE_V128:
|
|
tcg_out_insn(s, VRRa, VLR, dst, src, 0);
|
|
break;
|
|
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
return true;
|
|
}
|
|
|
|
static const S390Opcode lli_insns[4] = {
|
|
RI_LLILL, RI_LLILH, RI_LLIHL, RI_LLIHH
|
|
};
|
|
|
|
static bool maybe_out_small_movi(TCGContext *s, TCGType type,
|
|
TCGReg ret, tcg_target_long sval)
|
|
{
|
|
tcg_target_ulong uval = sval;
|
|
int i;
|
|
|
|
if (type == TCG_TYPE_I32) {
|
|
uval = (uint32_t)sval;
|
|
sval = (int32_t)sval;
|
|
}
|
|
|
|
/* Try all 32-bit insns that can load it in one go. */
|
|
if (sval >= -0x8000 && sval < 0x8000) {
|
|
tcg_out_insn(s, RI, LGHI, ret, sval);
|
|
return true;
|
|
}
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
tcg_target_long mask = 0xffffull << i*16;
|
|
if ((uval & mask) == uval) {
|
|
tcg_out_insn_RI(s, lli_insns[i], ret, uval >> i*16);
|
|
return true;
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
/* load a register with an immediate value */
|
|
static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
|
|
tcg_target_long sval, bool in_prologue)
|
|
{
|
|
tcg_target_ulong uval;
|
|
|
|
/* Try all 32-bit insns that can load it in one go. */
|
|
if (maybe_out_small_movi(s, type, ret, sval)) {
|
|
return;
|
|
}
|
|
|
|
uval = sval;
|
|
if (type == TCG_TYPE_I32) {
|
|
uval = (uint32_t)sval;
|
|
sval = (int32_t)sval;
|
|
}
|
|
|
|
/* Try all 48-bit insns that can load it in one go. */
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
if (sval == (int32_t)sval) {
|
|
tcg_out_insn(s, RIL, LGFI, ret, sval);
|
|
return;
|
|
}
|
|
if (uval <= 0xffffffff) {
|
|
tcg_out_insn(s, RIL, LLILF, ret, uval);
|
|
return;
|
|
}
|
|
if ((uval & 0xffffffff) == 0) {
|
|
tcg_out_insn(s, RIL, LLIHF, ret, uval >> 32);
|
|
return;
|
|
}
|
|
}
|
|
|
|
/* Try for PC-relative address load. For odd addresses,
|
|
attempt to use an offset from the start of the TB. */
|
|
if ((sval & 1) == 0) {
|
|
ptrdiff_t off = tcg_pcrel_diff(s, (void *)sval) >> 1;
|
|
if (off == (int32_t)off) {
|
|
tcg_out_insn(s, RIL, LARL, ret, off);
|
|
return;
|
|
}
|
|
} else if (USE_REG_TB && !in_prologue) {
|
|
ptrdiff_t off = tcg_tbrel_diff(s, (void *)sval);
|
|
if (off == sextract64(off, 0, 20)) {
|
|
/* This is certain to be an address within TB, and therefore
|
|
OFF will be negative; don't try RX_LA. */
|
|
tcg_out_insn(s, RXY, LAY, ret, TCG_REG_TB, TCG_REG_NONE, off);
|
|
return;
|
|
}
|
|
}
|
|
|
|
/* A 32-bit unsigned value can be loaded in 2 insns. And given
|
|
that LLILL, LLIHL, LLILF above did not succeed, we know that
|
|
both insns are required. */
|
|
if (uval <= 0xffffffff) {
|
|
tcg_out_insn(s, RI, LLILL, ret, uval);
|
|
tcg_out_insn(s, RI, IILH, ret, uval >> 16);
|
|
return;
|
|
}
|
|
|
|
/* Otherwise, stuff it in the constant pool. */
|
|
if (HAVE_FACILITY(GEN_INST_EXT)) {
|
|
tcg_out_insn(s, RIL, LGRL, ret, 0);
|
|
new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
|
|
} else if (USE_REG_TB && !in_prologue) {
|
|
tcg_out_insn(s, RXY, LG, ret, TCG_REG_TB, TCG_REG_NONE, 0);
|
|
new_pool_label(s, sval, R_390_20, s->code_ptr - 2,
|
|
tcg_tbrel_diff(s, NULL));
|
|
} else {
|
|
TCGReg base = ret ? ret : TCG_TMP0;
|
|
tcg_out_insn(s, RIL, LARL, base, 0);
|
|
new_pool_label(s, sval, R_390_PC32DBL, s->code_ptr - 2, 2);
|
|
tcg_out_insn(s, RXY, LG, ret, base, TCG_REG_NONE, 0);
|
|
}
|
|
}
|
|
|
|
static void tcg_out_movi(TCGContext *s, TCGType type,
|
|
TCGReg ret, tcg_target_long sval)
|
|
{
|
|
tcg_out_movi_int(s, type, ret, sval, false);
|
|
}
|
|
|
|
/* Emit a load/store type instruction. Inputs are:
|
|
DATA: The register to be loaded or stored.
|
|
BASE+OFS: The effective address.
|
|
OPC_RX: If the operation has an RX format opcode (e.g. STC), otherwise 0.
|
|
OPC_RXY: The RXY format opcode for the operation (e.g. STCY). */
|
|
|
|
static void tcg_out_mem(TCGContext *s, S390Opcode opc_rx, S390Opcode opc_rxy,
|
|
TCGReg data, TCGReg base, TCGReg index,
|
|
tcg_target_long ofs)
|
|
{
|
|
if (ofs < -0x80000 || ofs >= 0x80000) {
|
|
/* Combine the low 20 bits of the offset with the actual load insn;
|
|
the high 44 bits must come from an immediate load. */
|
|
tcg_target_long low = ((ofs & 0xfffff) ^ 0x80000) - 0x80000;
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - low);
|
|
ofs = low;
|
|
|
|
/* If we were already given an index register, add it in. */
|
|
if (index != TCG_REG_NONE) {
|
|
tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
|
|
}
|
|
index = TCG_TMP0;
|
|
}
|
|
|
|
if (opc_rx && ofs >= 0 && ofs < 0x1000) {
|
|
tcg_out_insn_RX(s, opc_rx, data, base, index, ofs);
|
|
} else {
|
|
tcg_out_insn_RXY(s, opc_rxy, data, base, index, ofs);
|
|
}
|
|
}
|
|
|
|
static void tcg_out_vrx_mem(TCGContext *s, S390Opcode opc_vrx,
|
|
TCGReg data, TCGReg base, TCGReg index,
|
|
tcg_target_long ofs, int m3)
|
|
{
|
|
if (ofs < 0 || ofs >= 0x1000) {
|
|
if (ofs >= -0x80000 && ofs < 0x80000) {
|
|
tcg_out_insn(s, RXY, LAY, TCG_TMP0, base, index, ofs);
|
|
base = TCG_TMP0;
|
|
index = TCG_REG_NONE;
|
|
ofs = 0;
|
|
} else {
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs);
|
|
if (index != TCG_REG_NONE) {
|
|
tcg_out_insn(s, RRE, AGR, TCG_TMP0, index);
|
|
}
|
|
index = TCG_TMP0;
|
|
ofs = 0;
|
|
}
|
|
}
|
|
tcg_out_insn_VRX(s, opc_vrx, data, base, index, ofs, m3);
|
|
}
|
|
|
|
/* load data without address translation or endianness conversion */
|
|
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg data,
|
|
TCGReg base, intptr_t ofs)
|
|
{
|
|
switch (type) {
|
|
case TCG_TYPE_I32:
|
|
if (likely(is_general_reg(data))) {
|
|
tcg_out_mem(s, RX_L, RXY_LY, data, base, TCG_REG_NONE, ofs);
|
|
break;
|
|
}
|
|
tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_32);
|
|
break;
|
|
|
|
case TCG_TYPE_I64:
|
|
if (likely(is_general_reg(data))) {
|
|
tcg_out_mem(s, 0, RXY_LG, data, base, TCG_REG_NONE, ofs);
|
|
break;
|
|
}
|
|
/* fallthru */
|
|
|
|
case TCG_TYPE_V64:
|
|
tcg_out_vrx_mem(s, VRX_VLLEZ, data, base, TCG_REG_NONE, ofs, MO_64);
|
|
break;
|
|
|
|
case TCG_TYPE_V128:
|
|
/* Hint quadword aligned. */
|
|
tcg_out_vrx_mem(s, VRX_VL, data, base, TCG_REG_NONE, ofs, 4);
|
|
break;
|
|
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg data,
|
|
TCGReg base, intptr_t ofs)
|
|
{
|
|
switch (type) {
|
|
case TCG_TYPE_I32:
|
|
if (likely(is_general_reg(data))) {
|
|
tcg_out_mem(s, RX_ST, RXY_STY, data, base, TCG_REG_NONE, ofs);
|
|
} else {
|
|
tcg_out_vrx_mem(s, VRX_VSTEF, data, base, TCG_REG_NONE, ofs, 1);
|
|
}
|
|
break;
|
|
|
|
case TCG_TYPE_I64:
|
|
if (likely(is_general_reg(data))) {
|
|
tcg_out_mem(s, 0, RXY_STG, data, base, TCG_REG_NONE, ofs);
|
|
break;
|
|
}
|
|
/* fallthru */
|
|
|
|
case TCG_TYPE_V64:
|
|
tcg_out_vrx_mem(s, VRX_VSTEG, data, base, TCG_REG_NONE, ofs, 0);
|
|
break;
|
|
|
|
case TCG_TYPE_V128:
|
|
/* Hint quadword aligned. */
|
|
tcg_out_vrx_mem(s, VRX_VST, data, base, TCG_REG_NONE, ofs, 4);
|
|
break;
|
|
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
|
TCGReg base, intptr_t ofs)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
/* load data from an absolute host address */
|
|
static void tcg_out_ld_abs(TCGContext *s, TCGType type,
|
|
TCGReg dest, const void *abs)
|
|
{
|
|
intptr_t addr = (intptr_t)abs;
|
|
|
|
if (HAVE_FACILITY(GEN_INST_EXT) && !(addr & 1)) {
|
|
ptrdiff_t disp = tcg_pcrel_diff(s, abs) >> 1;
|
|
if (disp == (int32_t)disp) {
|
|
if (type == TCG_TYPE_I32) {
|
|
tcg_out_insn(s, RIL, LRL, dest, disp);
|
|
} else {
|
|
tcg_out_insn(s, RIL, LGRL, dest, disp);
|
|
}
|
|
return;
|
|
}
|
|
}
|
|
if (USE_REG_TB) {
|
|
ptrdiff_t disp = tcg_tbrel_diff(s, abs);
|
|
if (disp == sextract64(disp, 0, 20)) {
|
|
tcg_out_ld(s, type, dest, TCG_REG_TB, disp);
|
|
return;
|
|
}
|
|
}
|
|
|
|
tcg_out_movi(s, TCG_TYPE_PTR, dest, addr & ~0xffff);
|
|
tcg_out_ld(s, type, dest, dest, addr & 0xffff);
|
|
}
|
|
|
|
static inline void tcg_out_risbg(TCGContext *s, TCGReg dest, TCGReg src,
|
|
int msb, int lsb, int ofs, int z)
|
|
{
|
|
/* Format RIE-f */
|
|
tcg_out16(s, (RIE_RISBG & 0xff00) | (dest << 4) | src);
|
|
tcg_out16(s, (msb << 8) | (z << 7) | lsb);
|
|
tcg_out16(s, (ofs << 8) | (RIE_RISBG & 0xff));
|
|
}
|
|
|
|
static void tgen_ext8s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
|
|
{
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
tcg_out_insn(s, RRE, LGBR, dest, src);
|
|
return;
|
|
}
|
|
|
|
if (type == TCG_TYPE_I32) {
|
|
if (dest == src) {
|
|
tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 24);
|
|
} else {
|
|
tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 24);
|
|
}
|
|
tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 24);
|
|
} else {
|
|
tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 56);
|
|
tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 56);
|
|
}
|
|
}
|
|
|
|
static void tgen_ext8u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
|
|
{
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
tcg_out_insn(s, RRE, LLGCR, dest, src);
|
|
return;
|
|
}
|
|
|
|
if (dest == src) {
|
|
tcg_out_movi(s, type, TCG_TMP0, 0xff);
|
|
src = TCG_TMP0;
|
|
} else {
|
|
tcg_out_movi(s, type, dest, 0xff);
|
|
}
|
|
if (type == TCG_TYPE_I32) {
|
|
tcg_out_insn(s, RR, NR, dest, src);
|
|
} else {
|
|
tcg_out_insn(s, RRE, NGR, dest, src);
|
|
}
|
|
}
|
|
|
|
static void tgen_ext16s(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
|
|
{
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
tcg_out_insn(s, RRE, LGHR, dest, src);
|
|
return;
|
|
}
|
|
|
|
if (type == TCG_TYPE_I32) {
|
|
if (dest == src) {
|
|
tcg_out_sh32(s, RS_SLL, dest, TCG_REG_NONE, 16);
|
|
} else {
|
|
tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 16);
|
|
}
|
|
tcg_out_sh32(s, RS_SRA, dest, TCG_REG_NONE, 16);
|
|
} else {
|
|
tcg_out_sh64(s, RSY_SLLG, dest, src, TCG_REG_NONE, 48);
|
|
tcg_out_sh64(s, RSY_SRAG, dest, dest, TCG_REG_NONE, 48);
|
|
}
|
|
}
|
|
|
|
static void tgen_ext16u(TCGContext *s, TCGType type, TCGReg dest, TCGReg src)
|
|
{
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
tcg_out_insn(s, RRE, LLGHR, dest, src);
|
|
return;
|
|
}
|
|
|
|
if (dest == src) {
|
|
tcg_out_movi(s, type, TCG_TMP0, 0xffff);
|
|
src = TCG_TMP0;
|
|
} else {
|
|
tcg_out_movi(s, type, dest, 0xffff);
|
|
}
|
|
if (type == TCG_TYPE_I32) {
|
|
tcg_out_insn(s, RR, NR, dest, src);
|
|
} else {
|
|
tcg_out_insn(s, RRE, NGR, dest, src);
|
|
}
|
|
}
|
|
|
|
static inline void tgen_ext32s(TCGContext *s, TCGReg dest, TCGReg src)
|
|
{
|
|
tcg_out_insn(s, RRE, LGFR, dest, src);
|
|
}
|
|
|
|
static inline void tgen_ext32u(TCGContext *s, TCGReg dest, TCGReg src)
|
|
{
|
|
tcg_out_insn(s, RRE, LLGFR, dest, src);
|
|
}
|
|
|
|
/* Accept bit patterns like these:
|
|
0....01....1
|
|
1....10....0
|
|
1..10..01..1
|
|
0..01..10..0
|
|
Copied from gcc sources. */
|
|
static inline bool risbg_mask(uint64_t c)
|
|
{
|
|
uint64_t lsb;
|
|
/* We don't change the number of transitions by inverting,
|
|
so make sure we start with the LSB zero. */
|
|
if (c & 1) {
|
|
c = ~c;
|
|
}
|
|
/* Reject all zeros or all ones. */
|
|
if (c == 0) {
|
|
return false;
|
|
}
|
|
/* Find the first transition. */
|
|
lsb = c & -c;
|
|
/* Invert to look for a second transition. */
|
|
c = ~c;
|
|
/* Erase the first transition. */
|
|
c &= -lsb;
|
|
/* Find the second transition, if any. */
|
|
lsb = c & -c;
|
|
/* Match if all the bits are 1's, or if c is zero. */
|
|
return c == -lsb;
|
|
}
|
|
|
|
static void tgen_andi_risbg(TCGContext *s, TCGReg out, TCGReg in, uint64_t val)
|
|
{
|
|
int msb, lsb;
|
|
if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
|
|
/* Achieve wraparound by swapping msb and lsb. */
|
|
msb = 64 - ctz64(~val);
|
|
lsb = clz64(~val) - 1;
|
|
} else {
|
|
msb = clz64(val);
|
|
lsb = 63 - ctz64(val);
|
|
}
|
|
tcg_out_risbg(s, out, in, msb, lsb, 0, 1);
|
|
}
|
|
|
|
static void tgen_andi(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
|
|
{
|
|
static const S390Opcode ni_insns[4] = {
|
|
RI_NILL, RI_NILH, RI_NIHL, RI_NIHH
|
|
};
|
|
static const S390Opcode nif_insns[2] = {
|
|
RIL_NILF, RIL_NIHF
|
|
};
|
|
uint64_t valid = (type == TCG_TYPE_I32 ? 0xffffffffull : -1ull);
|
|
int i;
|
|
|
|
/* Look for the zero-extensions. */
|
|
if ((val & valid) == 0xffffffff) {
|
|
tgen_ext32u(s, dest, dest);
|
|
return;
|
|
}
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
if ((val & valid) == 0xff) {
|
|
tgen_ext8u(s, TCG_TYPE_I64, dest, dest);
|
|
return;
|
|
}
|
|
if ((val & valid) == 0xffff) {
|
|
tgen_ext16u(s, TCG_TYPE_I64, dest, dest);
|
|
return;
|
|
}
|
|
}
|
|
|
|
/* Try all 32-bit insns that can perform it in one go. */
|
|
for (i = 0; i < 4; i++) {
|
|
tcg_target_ulong mask = ~(0xffffull << i*16);
|
|
if (((val | ~valid) & mask) == mask) {
|
|
tcg_out_insn_RI(s, ni_insns[i], dest, val >> i*16);
|
|
return;
|
|
}
|
|
}
|
|
|
|
/* Try all 48-bit insns that can perform it in one go. */
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
for (i = 0; i < 2; i++) {
|
|
tcg_target_ulong mask = ~(0xffffffffull << i*32);
|
|
if (((val | ~valid) & mask) == mask) {
|
|
tcg_out_insn_RIL(s, nif_insns[i], dest, val >> i*32);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
if (HAVE_FACILITY(GEN_INST_EXT) && risbg_mask(val)) {
|
|
tgen_andi_risbg(s, dest, dest, val);
|
|
return;
|
|
}
|
|
|
|
/* Use the constant pool if USE_REG_TB, but not for small constants. */
|
|
if (USE_REG_TB) {
|
|
if (!maybe_out_small_movi(s, type, TCG_TMP0, val)) {
|
|
tcg_out_insn(s, RXY, NG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
|
|
new_pool_label(s, val & valid, R_390_20, s->code_ptr - 2,
|
|
tcg_tbrel_diff(s, NULL));
|
|
return;
|
|
}
|
|
} else {
|
|
tcg_out_movi(s, type, TCG_TMP0, val);
|
|
}
|
|
if (type == TCG_TYPE_I32) {
|
|
tcg_out_insn(s, RR, NR, dest, TCG_TMP0);
|
|
} else {
|
|
tcg_out_insn(s, RRE, NGR, dest, TCG_TMP0);
|
|
}
|
|
}
|
|
|
|
static void tgen_ori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
|
|
{
|
|
static const S390Opcode oi_insns[4] = {
|
|
RI_OILL, RI_OILH, RI_OIHL, RI_OIHH
|
|
};
|
|
static const S390Opcode oif_insns[2] = {
|
|
RIL_OILF, RIL_OIHF
|
|
};
|
|
|
|
int i;
|
|
|
|
/* Look for no-op. */
|
|
if (unlikely(val == 0)) {
|
|
return;
|
|
}
|
|
|
|
/* Try all 32-bit insns that can perform it in one go. */
|
|
for (i = 0; i < 4; i++) {
|
|
tcg_target_ulong mask = (0xffffull << i*16);
|
|
if ((val & mask) != 0 && (val & ~mask) == 0) {
|
|
tcg_out_insn_RI(s, oi_insns[i], dest, val >> i*16);
|
|
return;
|
|
}
|
|
}
|
|
|
|
/* Try all 48-bit insns that can perform it in one go. */
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
for (i = 0; i < 2; i++) {
|
|
tcg_target_ulong mask = (0xffffffffull << i*32);
|
|
if ((val & mask) != 0 && (val & ~mask) == 0) {
|
|
tcg_out_insn_RIL(s, oif_insns[i], dest, val >> i*32);
|
|
return;
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Use the constant pool if USE_REG_TB, but not for small constants. */
|
|
if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
|
|
if (type == TCG_TYPE_I32) {
|
|
tcg_out_insn(s, RR, OR, dest, TCG_TMP0);
|
|
} else {
|
|
tcg_out_insn(s, RRE, OGR, dest, TCG_TMP0);
|
|
}
|
|
} else if (USE_REG_TB) {
|
|
tcg_out_insn(s, RXY, OG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
|
|
new_pool_label(s, val, R_390_20, s->code_ptr - 2,
|
|
tcg_tbrel_diff(s, NULL));
|
|
} else {
|
|
/* Perform the OR via sequential modifications to the high and
|
|
low parts. Do this via recursion to handle 16-bit vs 32-bit
|
|
masks in each half. */
|
|
tcg_debug_assert(HAVE_FACILITY(EXT_IMM));
|
|
tgen_ori(s, type, dest, val & 0x00000000ffffffffull);
|
|
tgen_ori(s, type, dest, val & 0xffffffff00000000ull);
|
|
}
|
|
}
|
|
|
|
static void tgen_xori(TCGContext *s, TCGType type, TCGReg dest, uint64_t val)
|
|
{
|
|
/* Try all 48-bit insns that can perform it in one go. */
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
if ((val & 0xffffffff00000000ull) == 0) {
|
|
tcg_out_insn(s, RIL, XILF, dest, val);
|
|
return;
|
|
}
|
|
if ((val & 0x00000000ffffffffull) == 0) {
|
|
tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
|
|
return;
|
|
}
|
|
}
|
|
|
|
/* Use the constant pool if USE_REG_TB, but not for small constants. */
|
|
if (maybe_out_small_movi(s, type, TCG_TMP0, val)) {
|
|
if (type == TCG_TYPE_I32) {
|
|
tcg_out_insn(s, RR, XR, dest, TCG_TMP0);
|
|
} else {
|
|
tcg_out_insn(s, RRE, XGR, dest, TCG_TMP0);
|
|
}
|
|
} else if (USE_REG_TB) {
|
|
tcg_out_insn(s, RXY, XG, dest, TCG_REG_TB, TCG_REG_NONE, 0);
|
|
new_pool_label(s, val, R_390_20, s->code_ptr - 2,
|
|
tcg_tbrel_diff(s, NULL));
|
|
} else {
|
|
/* Perform the xor by parts. */
|
|
tcg_debug_assert(HAVE_FACILITY(EXT_IMM));
|
|
if (val & 0xffffffff) {
|
|
tcg_out_insn(s, RIL, XILF, dest, val);
|
|
}
|
|
if (val > 0xffffffff) {
|
|
tcg_out_insn(s, RIL, XIHF, dest, val >> 32);
|
|
}
|
|
}
|
|
}
|
|
|
|
static int tgen_cmp(TCGContext *s, TCGType type, TCGCond c, TCGReg r1,
|
|
TCGArg c2, bool c2const, bool need_carry)
|
|
{
|
|
bool is_unsigned = is_unsigned_cond(c);
|
|
S390Opcode op;
|
|
|
|
if (c2const) {
|
|
if (c2 == 0) {
|
|
if (!(is_unsigned && need_carry)) {
|
|
if (type == TCG_TYPE_I32) {
|
|
tcg_out_insn(s, RR, LTR, r1, r1);
|
|
} else {
|
|
tcg_out_insn(s, RRE, LTGR, r1, r1);
|
|
}
|
|
return tcg_cond_to_ltr_cond[c];
|
|
}
|
|
}
|
|
|
|
if (!is_unsigned && c2 == (int16_t)c2) {
|
|
op = (type == TCG_TYPE_I32 ? RI_CHI : RI_CGHI);
|
|
tcg_out_insn_RI(s, op, r1, c2);
|
|
goto exit;
|
|
}
|
|
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
if (type == TCG_TYPE_I32) {
|
|
op = (is_unsigned ? RIL_CLFI : RIL_CFI);
|
|
tcg_out_insn_RIL(s, op, r1, c2);
|
|
goto exit;
|
|
} else if (c2 == (is_unsigned ? (TCGArg)(uint32_t)c2 : (TCGArg)(int32_t)c2)) {
|
|
op = (is_unsigned ? RIL_CLGFI : RIL_CGFI);
|
|
tcg_out_insn_RIL(s, op, r1, c2);
|
|
goto exit;
|
|
}
|
|
}
|
|
|
|
/* Use the constant pool, but not for small constants. */
|
|
if (maybe_out_small_movi(s, type, TCG_TMP0, c2)) {
|
|
c2 = TCG_TMP0;
|
|
/* fall through to reg-reg */
|
|
} else if (USE_REG_TB) {
|
|
if (type == TCG_TYPE_I32) {
|
|
op = (is_unsigned ? RXY_CLY : RXY_CY);
|
|
tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
|
|
new_pool_label(s, (uint32_t)c2, R_390_20, s->code_ptr - 2,
|
|
4 - tcg_tbrel_diff(s, NULL));
|
|
} else {
|
|
op = (is_unsigned ? RXY_CLG : RXY_CG);
|
|
tcg_out_insn_RXY(s, op, r1, TCG_REG_TB, TCG_REG_NONE, 0);
|
|
new_pool_label(s, c2, R_390_20, s->code_ptr - 2,
|
|
tcg_tbrel_diff(s, NULL));
|
|
}
|
|
goto exit;
|
|
} else {
|
|
if (type == TCG_TYPE_I32) {
|
|
op = (is_unsigned ? RIL_CLRL : RIL_CRL);
|
|
tcg_out_insn_RIL(s, op, r1, 0);
|
|
new_pool_label(s, (uint32_t)c2, R_390_PC32DBL,
|
|
s->code_ptr - 2, 2 + 4);
|
|
} else {
|
|
op = (is_unsigned ? RIL_CLGRL : RIL_CGRL);
|
|
tcg_out_insn_RIL(s, op, r1, 0);
|
|
new_pool_label(s, c2, R_390_PC32DBL, s->code_ptr - 2, 2);
|
|
}
|
|
goto exit;
|
|
}
|
|
}
|
|
|
|
if (type == TCG_TYPE_I32) {
|
|
op = (is_unsigned ? RR_CLR : RR_CR);
|
|
tcg_out_insn_RR(s, op, r1, c2);
|
|
} else {
|
|
op = (is_unsigned ? RRE_CLGR : RRE_CGR);
|
|
tcg_out_insn_RRE(s, op, r1, c2);
|
|
}
|
|
|
|
exit:
|
|
return tcg_cond_to_s390_cond[c];
|
|
}
|
|
|
|
static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond,
|
|
TCGReg dest, TCGReg c1, TCGArg c2, int c2const)
|
|
{
|
|
int cc;
|
|
bool have_loc;
|
|
|
|
/* With LOC2, we can always emit the minimum 3 insns. */
|
|
if (HAVE_FACILITY(LOAD_ON_COND2)) {
|
|
/* Emit: d = 0, d = (cc ? 1 : d). */
|
|
cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
|
|
tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
|
|
tcg_out_insn(s, RIE, LOCGHI, dest, 1, cc);
|
|
return;
|
|
}
|
|
|
|
have_loc = HAVE_FACILITY(LOAD_ON_COND);
|
|
|
|
/* For HAVE_LOC, only the paths through GTU/GT/LEU/LE are smaller. */
|
|
restart:
|
|
switch (cond) {
|
|
case TCG_COND_NE:
|
|
/* X != 0 is X > 0. */
|
|
if (c2const && c2 == 0) {
|
|
cond = TCG_COND_GTU;
|
|
} else {
|
|
break;
|
|
}
|
|
/* fallthru */
|
|
|
|
case TCG_COND_GTU:
|
|
case TCG_COND_GT:
|
|
/* The result of a compare has CC=2 for GT and CC=3 unused.
|
|
ADD LOGICAL WITH CARRY considers (CC & 2) the carry bit. */
|
|
tgen_cmp(s, type, cond, c1, c2, c2const, true);
|
|
tcg_out_movi(s, type, dest, 0);
|
|
tcg_out_insn(s, RRE, ALCGR, dest, dest);
|
|
return;
|
|
|
|
case TCG_COND_EQ:
|
|
/* X == 0 is X <= 0. */
|
|
if (c2const && c2 == 0) {
|
|
cond = TCG_COND_LEU;
|
|
} else {
|
|
break;
|
|
}
|
|
/* fallthru */
|
|
|
|
case TCG_COND_LEU:
|
|
case TCG_COND_LE:
|
|
/* As above, but we're looking for borrow, or !carry.
|
|
The second insn computes d - d - borrow, or -1 for true
|
|
and 0 for false. So we must mask to 1 bit afterward. */
|
|
tgen_cmp(s, type, cond, c1, c2, c2const, true);
|
|
tcg_out_insn(s, RRE, SLBGR, dest, dest);
|
|
tgen_andi(s, type, dest, 1);
|
|
return;
|
|
|
|
case TCG_COND_GEU:
|
|
case TCG_COND_LTU:
|
|
case TCG_COND_LT:
|
|
case TCG_COND_GE:
|
|
/* Swap operands so that we can use LEU/GTU/GT/LE. */
|
|
if (c2const) {
|
|
if (have_loc) {
|
|
break;
|
|
}
|
|
tcg_out_movi(s, type, TCG_TMP0, c2);
|
|
c2 = c1;
|
|
c2const = 0;
|
|
c1 = TCG_TMP0;
|
|
} else {
|
|
TCGReg t = c1;
|
|
c1 = c2;
|
|
c2 = t;
|
|
}
|
|
cond = tcg_swap_cond(cond);
|
|
goto restart;
|
|
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
|
|
cc = tgen_cmp(s, type, cond, c1, c2, c2const, false);
|
|
if (have_loc) {
|
|
/* Emit: d = 0, t = 1, d = (cc ? t : d). */
|
|
tcg_out_movi(s, TCG_TYPE_I64, dest, 0);
|
|
tcg_out_movi(s, TCG_TYPE_I64, TCG_TMP0, 1);
|
|
tcg_out_insn(s, RRF, LOCGR, dest, TCG_TMP0, cc);
|
|
} else {
|
|
/* Emit: d = 1; if (cc) goto over; d = 0; over: */
|
|
tcg_out_movi(s, type, dest, 1);
|
|
tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
|
|
tcg_out_movi(s, type, dest, 0);
|
|
}
|
|
}
|
|
|
|
static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest,
|
|
TCGReg c1, TCGArg c2, int c2const,
|
|
TCGArg v3, int v3const)
|
|
{
|
|
int cc;
|
|
if (HAVE_FACILITY(LOAD_ON_COND)) {
|
|
cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
|
|
if (v3const) {
|
|
tcg_out_insn(s, RIE, LOCGHI, dest, v3, cc);
|
|
} else {
|
|
tcg_out_insn(s, RRF, LOCGR, dest, v3, cc);
|
|
}
|
|
} else {
|
|
c = tcg_invert_cond(c);
|
|
cc = tgen_cmp(s, type, c, c1, c2, c2const, false);
|
|
|
|
/* Emit: if (cc) goto over; dest = r3; over: */
|
|
tcg_out_insn(s, RI, BRC, cc, (4 + 4) >> 1);
|
|
tcg_out_insn(s, RRE, LGR, dest, v3);
|
|
}
|
|
}
|
|
|
|
static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1,
|
|
TCGArg a2, int a2const)
|
|
{
|
|
/* Since this sets both R and R+1, we have no choice but to store the
|
|
result into R0, allowing R1 == TCG_TMP0 to be clobbered as well. */
|
|
QEMU_BUILD_BUG_ON(TCG_TMP0 != TCG_REG_R1);
|
|
tcg_out_insn(s, RRE, FLOGR, TCG_REG_R0, a1);
|
|
|
|
if (a2const && a2 == 64) {
|
|
tcg_out_mov(s, TCG_TYPE_I64, dest, TCG_REG_R0);
|
|
} else {
|
|
if (a2const) {
|
|
tcg_out_movi(s, TCG_TYPE_I64, dest, a2);
|
|
} else {
|
|
tcg_out_mov(s, TCG_TYPE_I64, dest, a2);
|
|
}
|
|
if (HAVE_FACILITY(LOAD_ON_COND)) {
|
|
/* Emit: if (one bit found) dest = r0. */
|
|
tcg_out_insn(s, RRF, LOCGR, dest, TCG_REG_R0, 2);
|
|
} else {
|
|
/* Emit: if (no one bit found) goto over; dest = r0; over: */
|
|
tcg_out_insn(s, RI, BRC, 8, (4 + 4) >> 1);
|
|
tcg_out_insn(s, RRE, LGR, dest, TCG_REG_R0);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void tgen_deposit(TCGContext *s, TCGReg dest, TCGReg src,
|
|
int ofs, int len, int z)
|
|
{
|
|
int lsb = (63 - ofs);
|
|
int msb = lsb - (len - 1);
|
|
tcg_out_risbg(s, dest, src, msb, lsb, ofs, z);
|
|
}
|
|
|
|
static void tgen_extract(TCGContext *s, TCGReg dest, TCGReg src,
|
|
int ofs, int len)
|
|
{
|
|
tcg_out_risbg(s, dest, src, 64 - len, 63, 64 - ofs, 1);
|
|
}
|
|
|
|
static void tgen_gotoi(TCGContext *s, int cc, const tcg_insn_unit *dest)
|
|
{
|
|
ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
|
|
if (off == (int16_t)off) {
|
|
tcg_out_insn(s, RI, BRC, cc, off);
|
|
} else if (off == (int32_t)off) {
|
|
tcg_out_insn(s, RIL, BRCL, cc, off);
|
|
} else {
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
|
|
tcg_out_insn(s, RR, BCR, cc, TCG_TMP0);
|
|
}
|
|
}
|
|
|
|
static void tgen_branch(TCGContext *s, int cc, TCGLabel *l)
|
|
{
|
|
if (l->has_value) {
|
|
tgen_gotoi(s, cc, l->u.value_ptr);
|
|
} else if (USE_LONG_BRANCHES) {
|
|
tcg_out16(s, RIL_BRCL | (cc << 4));
|
|
tcg_out_reloc(s, s->code_ptr, R_390_PC32DBL, l, 2);
|
|
s->code_ptr += 2;
|
|
} else {
|
|
tcg_out16(s, RI_BRC | (cc << 4));
|
|
tcg_out_reloc(s, s->code_ptr, R_390_PC16DBL, l, 2);
|
|
s->code_ptr += 1;
|
|
}
|
|
}
|
|
|
|
static void tgen_compare_branch(TCGContext *s, S390Opcode opc, int cc,
|
|
TCGReg r1, TCGReg r2, TCGLabel *l)
|
|
{
|
|
tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
|
|
tcg_out16(s, (opc & 0xff00) | (r1 << 4) | r2);
|
|
tcg_out16(s, 0);
|
|
tcg_out16(s, cc << 12 | (opc & 0xff));
|
|
}
|
|
|
|
static void tgen_compare_imm_branch(TCGContext *s, S390Opcode opc, int cc,
|
|
TCGReg r1, int i2, TCGLabel *l)
|
|
{
|
|
tcg_out_reloc(s, s->code_ptr + 1, R_390_PC16DBL, l, 2);
|
|
tcg_out16(s, (opc & 0xff00) | (r1 << 4) | cc);
|
|
tcg_out16(s, 0);
|
|
tcg_out16(s, (i2 << 8) | (opc & 0xff));
|
|
}
|
|
|
|
static void tgen_brcond(TCGContext *s, TCGType type, TCGCond c,
|
|
TCGReg r1, TCGArg c2, int c2const, TCGLabel *l)
|
|
{
|
|
int cc;
|
|
|
|
if (HAVE_FACILITY(GEN_INST_EXT)) {
|
|
bool is_unsigned = is_unsigned_cond(c);
|
|
bool in_range;
|
|
S390Opcode opc;
|
|
|
|
cc = tcg_cond_to_s390_cond[c];
|
|
|
|
if (!c2const) {
|
|
opc = (type == TCG_TYPE_I32
|
|
? (is_unsigned ? RIE_CLRJ : RIE_CRJ)
|
|
: (is_unsigned ? RIE_CLGRJ : RIE_CGRJ));
|
|
tgen_compare_branch(s, opc, cc, r1, c2, l);
|
|
return;
|
|
}
|
|
|
|
/* COMPARE IMMEDIATE AND BRANCH RELATIVE has an 8-bit immediate field.
|
|
If the immediate we've been given does not fit that range, we'll
|
|
fall back to separate compare and branch instructions using the
|
|
larger comparison range afforded by COMPARE IMMEDIATE. */
|
|
if (type == TCG_TYPE_I32) {
|
|
if (is_unsigned) {
|
|
opc = RIE_CLIJ;
|
|
in_range = (uint32_t)c2 == (uint8_t)c2;
|
|
} else {
|
|
opc = RIE_CIJ;
|
|
in_range = (int32_t)c2 == (int8_t)c2;
|
|
}
|
|
} else {
|
|
if (is_unsigned) {
|
|
opc = RIE_CLGIJ;
|
|
in_range = (uint64_t)c2 == (uint8_t)c2;
|
|
} else {
|
|
opc = RIE_CGIJ;
|
|
in_range = (int64_t)c2 == (int8_t)c2;
|
|
}
|
|
}
|
|
if (in_range) {
|
|
tgen_compare_imm_branch(s, opc, cc, r1, c2, l);
|
|
return;
|
|
}
|
|
}
|
|
|
|
cc = tgen_cmp(s, type, c, r1, c2, c2const, false);
|
|
tgen_branch(s, cc, l);
|
|
}
|
|
|
|
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
|
|
{
|
|
ptrdiff_t off = tcg_pcrel_diff(s, dest) >> 1;
|
|
if (off == (int32_t)off) {
|
|
tcg_out_insn(s, RIL, BRASL, TCG_REG_R14, off);
|
|
} else {
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, (uintptr_t)dest);
|
|
tcg_out_insn(s, RR, BASR, TCG_REG_R14, TCG_TMP0);
|
|
}
|
|
}
|
|
|
|
static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc, TCGReg data,
|
|
TCGReg base, TCGReg index, int disp)
|
|
{
|
|
switch (opc & (MO_SSIZE | MO_BSWAP)) {
|
|
case MO_UB:
|
|
tcg_out_insn(s, RXY, LLGC, data, base, index, disp);
|
|
break;
|
|
case MO_SB:
|
|
tcg_out_insn(s, RXY, LGB, data, base, index, disp);
|
|
break;
|
|
|
|
case MO_UW | MO_BSWAP:
|
|
/* swapped unsigned halfword load with upper bits zeroed */
|
|
tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
|
|
tgen_ext16u(s, TCG_TYPE_I64, data, data);
|
|
break;
|
|
case MO_UW:
|
|
tcg_out_insn(s, RXY, LLGH, data, base, index, disp);
|
|
break;
|
|
|
|
case MO_SW | MO_BSWAP:
|
|
/* swapped sign-extended halfword load */
|
|
tcg_out_insn(s, RXY, LRVH, data, base, index, disp);
|
|
tgen_ext16s(s, TCG_TYPE_I64, data, data);
|
|
break;
|
|
case MO_SW:
|
|
tcg_out_insn(s, RXY, LGH, data, base, index, disp);
|
|
break;
|
|
|
|
case MO_UL | MO_BSWAP:
|
|
/* swapped unsigned int load with upper bits zeroed */
|
|
tcg_out_insn(s, RXY, LRV, data, base, index, disp);
|
|
tgen_ext32u(s, data, data);
|
|
break;
|
|
case MO_UL:
|
|
tcg_out_insn(s, RXY, LLGF, data, base, index, disp);
|
|
break;
|
|
|
|
case MO_SL | MO_BSWAP:
|
|
/* swapped sign-extended int load */
|
|
tcg_out_insn(s, RXY, LRV, data, base, index, disp);
|
|
tgen_ext32s(s, data, data);
|
|
break;
|
|
case MO_SL:
|
|
tcg_out_insn(s, RXY, LGF, data, base, index, disp);
|
|
break;
|
|
|
|
case MO_UQ | MO_BSWAP:
|
|
tcg_out_insn(s, RXY, LRVG, data, base, index, disp);
|
|
break;
|
|
case MO_UQ:
|
|
tcg_out_insn(s, RXY, LG, data, base, index, disp);
|
|
break;
|
|
|
|
default:
|
|
tcg_abort();
|
|
}
|
|
}
|
|
|
|
static void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc, TCGReg data,
|
|
TCGReg base, TCGReg index, int disp)
|
|
{
|
|
switch (opc & (MO_SIZE | MO_BSWAP)) {
|
|
case MO_UB:
|
|
if (disp >= 0 && disp < 0x1000) {
|
|
tcg_out_insn(s, RX, STC, data, base, index, disp);
|
|
} else {
|
|
tcg_out_insn(s, RXY, STCY, data, base, index, disp);
|
|
}
|
|
break;
|
|
|
|
case MO_UW | MO_BSWAP:
|
|
tcg_out_insn(s, RXY, STRVH, data, base, index, disp);
|
|
break;
|
|
case MO_UW:
|
|
if (disp >= 0 && disp < 0x1000) {
|
|
tcg_out_insn(s, RX, STH, data, base, index, disp);
|
|
} else {
|
|
tcg_out_insn(s, RXY, STHY, data, base, index, disp);
|
|
}
|
|
break;
|
|
|
|
case MO_UL | MO_BSWAP:
|
|
tcg_out_insn(s, RXY, STRV, data, base, index, disp);
|
|
break;
|
|
case MO_UL:
|
|
if (disp >= 0 && disp < 0x1000) {
|
|
tcg_out_insn(s, RX, ST, data, base, index, disp);
|
|
} else {
|
|
tcg_out_insn(s, RXY, STY, data, base, index, disp);
|
|
}
|
|
break;
|
|
|
|
case MO_UQ | MO_BSWAP:
|
|
tcg_out_insn(s, RXY, STRVG, data, base, index, disp);
|
|
break;
|
|
case MO_UQ:
|
|
tcg_out_insn(s, RXY, STG, data, base, index, disp);
|
|
break;
|
|
|
|
default:
|
|
tcg_abort();
|
|
}
|
|
}
|
|
|
|
#if defined(CONFIG_SOFTMMU)
|
|
/* We're expecting to use a 20-bit negative offset on the tlb memory ops. */
|
|
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
|
|
QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 19));
|
|
|
|
/* Load and compare a TLB entry, leaving the flags set. Loads the TLB
|
|
addend into R2. Returns a register with the santitized guest address. */
|
|
static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
|
|
int mem_index, bool is_ld)
|
|
{
|
|
unsigned s_bits = opc & MO_SIZE;
|
|
unsigned a_bits = get_alignment_bits(opc);
|
|
unsigned s_mask = (1 << s_bits) - 1;
|
|
unsigned a_mask = (1 << a_bits) - 1;
|
|
int fast_off = TLB_MASK_TABLE_OFS(mem_index);
|
|
int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
|
|
int table_off = fast_off + offsetof(CPUTLBDescFast, table);
|
|
int ofs, a_off;
|
|
uint64_t tlb_mask;
|
|
|
|
tcg_out_sh64(s, RSY_SRLG, TCG_REG_R2, addr_reg, TCG_REG_NONE,
|
|
TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
|
tcg_out_insn(s, RXY, NG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, mask_off);
|
|
tcg_out_insn(s, RXY, AG, TCG_REG_R2, TCG_AREG0, TCG_REG_NONE, table_off);
|
|
|
|
/* For aligned accesses, we check the first byte and include the alignment
|
|
bits within the address. For unaligned access, we check that we don't
|
|
cross pages using the address of the last byte of the access. */
|
|
a_off = (a_bits >= s_bits ? 0 : s_mask - a_mask);
|
|
tlb_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
|
|
if (HAVE_FACILITY(GEN_INST_EXT) && a_off == 0) {
|
|
tgen_andi_risbg(s, TCG_REG_R3, addr_reg, tlb_mask);
|
|
} else {
|
|
tcg_out_insn(s, RX, LA, TCG_REG_R3, addr_reg, TCG_REG_NONE, a_off);
|
|
tgen_andi(s, TCG_TYPE_TL, TCG_REG_R3, tlb_mask);
|
|
}
|
|
|
|
if (is_ld) {
|
|
ofs = offsetof(CPUTLBEntry, addr_read);
|
|
} else {
|
|
ofs = offsetof(CPUTLBEntry, addr_write);
|
|
}
|
|
if (TARGET_LONG_BITS == 32) {
|
|
tcg_out_insn(s, RX, C, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs);
|
|
} else {
|
|
tcg_out_insn(s, RXY, CG, TCG_REG_R3, TCG_REG_R2, TCG_REG_NONE, ofs);
|
|
}
|
|
|
|
tcg_out_insn(s, RXY, LG, TCG_REG_R2, TCG_REG_R2, TCG_REG_NONE,
|
|
offsetof(CPUTLBEntry, addend));
|
|
|
|
if (TARGET_LONG_BITS == 32) {
|
|
tgen_ext32u(s, TCG_REG_R3, addr_reg);
|
|
return TCG_REG_R3;
|
|
}
|
|
return addr_reg;
|
|
}
|
|
|
|
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
|
|
TCGReg data, TCGReg addr,
|
|
tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
|
|
{
|
|
TCGLabelQemuLdst *label = new_ldst_label(s);
|
|
|
|
label->is_ld = is_ld;
|
|
label->oi = oi;
|
|
label->datalo_reg = data;
|
|
label->addrlo_reg = addr;
|
|
label->raddr = tcg_splitwx_to_rx(raddr);
|
|
label->label_ptr[0] = label_ptr;
|
|
}
|
|
|
|
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
|
{
|
|
TCGReg addr_reg = lb->addrlo_reg;
|
|
TCGReg data_reg = lb->datalo_reg;
|
|
MemOpIdx oi = lb->oi;
|
|
MemOp opc = get_memop(oi);
|
|
|
|
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
|
|
(intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
|
|
return false;
|
|
}
|
|
|
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
|
|
if (TARGET_LONG_BITS == 64) {
|
|
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
|
|
}
|
|
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
|
|
tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]);
|
|
tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
|
|
|
|
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
|
|
return true;
|
|
}
|
|
|
|
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
|
{
|
|
TCGReg addr_reg = lb->addrlo_reg;
|
|
TCGReg data_reg = lb->datalo_reg;
|
|
MemOpIdx oi = lb->oi;
|
|
MemOp opc = get_memop(oi);
|
|
|
|
if (!patch_reloc(lb->label_ptr[0], R_390_PC16DBL,
|
|
(intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
|
|
return false;
|
|
}
|
|
|
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
|
|
if (TARGET_LONG_BITS == 64) {
|
|
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
|
|
}
|
|
switch (opc & MO_SIZE) {
|
|
case MO_UB:
|
|
tgen_ext8u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
|
|
break;
|
|
case MO_UW:
|
|
tgen_ext16u(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
|
|
break;
|
|
case MO_UL:
|
|
tgen_ext32u(s, TCG_REG_R4, data_reg);
|
|
break;
|
|
case MO_UQ:
|
|
tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, data_reg);
|
|
break;
|
|
default:
|
|
tcg_abort();
|
|
}
|
|
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
|
|
tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
|
|
|
|
tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr);
|
|
return true;
|
|
}
|
|
#else
|
|
static void tcg_out_test_alignment(TCGContext *s, bool is_ld,
|
|
TCGReg addrlo, unsigned a_bits)
|
|
{
|
|
unsigned a_mask = (1 << a_bits) - 1;
|
|
TCGLabelQemuLdst *l = new_ldst_label(s);
|
|
|
|
l->is_ld = is_ld;
|
|
l->addrlo_reg = addrlo;
|
|
|
|
/* We are expecting a_bits to max out at 7, much lower than TMLL. */
|
|
tcg_debug_assert(a_bits < 16);
|
|
tcg_out_insn(s, RI, TMLL, addrlo, a_mask);
|
|
|
|
tcg_out16(s, RI_BRC | (7 << 4)); /* CC in {1,2,3} */
|
|
l->label_ptr[0] = s->code_ptr;
|
|
s->code_ptr += 1;
|
|
|
|
l->raddr = tcg_splitwx_to_rx(s->code_ptr);
|
|
}
|
|
|
|
static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
|
|
{
|
|
if (!patch_reloc(l->label_ptr[0], R_390_PC16DBL,
|
|
(intptr_t)tcg_splitwx_to_rx(s->code_ptr), 2)) {
|
|
return false;
|
|
}
|
|
|
|
tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, l->addrlo_reg);
|
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_AREG0);
|
|
|
|
/* "Tail call" to the helper, with the return address back inline. */
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R14, (uintptr_t)l->raddr);
|
|
tgen_gotoi(s, S390_CC_ALWAYS, (const void *)(l->is_ld ? helper_unaligned_ld
|
|
: helper_unaligned_st));
|
|
return true;
|
|
}
|
|
|
|
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
|
{
|
|
return tcg_out_fail_alignment(s, l);
|
|
}
|
|
|
|
static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
|
|
{
|
|
return tcg_out_fail_alignment(s, l);
|
|
}
|
|
|
|
static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
|
|
TCGReg *index_reg, tcg_target_long *disp)
|
|
{
|
|
if (TARGET_LONG_BITS == 32) {
|
|
tgen_ext32u(s, TCG_TMP0, *addr_reg);
|
|
*addr_reg = TCG_TMP0;
|
|
}
|
|
if (guest_base < 0x80000) {
|
|
*index_reg = TCG_REG_NONE;
|
|
*disp = guest_base;
|
|
} else {
|
|
*index_reg = TCG_GUEST_BASE_REG;
|
|
*disp = 0;
|
|
}
|
|
}
|
|
#endif /* CONFIG_SOFTMMU */
|
|
|
|
static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
|
|
MemOpIdx oi)
|
|
{
|
|
MemOp opc = get_memop(oi);
|
|
#ifdef CONFIG_SOFTMMU
|
|
unsigned mem_index = get_mmuidx(oi);
|
|
tcg_insn_unit *label_ptr;
|
|
TCGReg base_reg;
|
|
|
|
base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 1);
|
|
|
|
tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
|
|
label_ptr = s->code_ptr;
|
|
s->code_ptr += 1;
|
|
|
|
tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
|
|
|
|
add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
|
|
#else
|
|
TCGReg index_reg;
|
|
tcg_target_long disp;
|
|
unsigned a_bits = get_alignment_bits(opc);
|
|
|
|
if (a_bits) {
|
|
tcg_out_test_alignment(s, true, addr_reg, a_bits);
|
|
}
|
|
tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
|
|
tcg_out_qemu_ld_direct(s, opc, data_reg, addr_reg, index_reg, disp);
|
|
#endif
|
|
}
|
|
|
|
static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
|
|
MemOpIdx oi)
|
|
{
|
|
MemOp opc = get_memop(oi);
|
|
#ifdef CONFIG_SOFTMMU
|
|
unsigned mem_index = get_mmuidx(oi);
|
|
tcg_insn_unit *label_ptr;
|
|
TCGReg base_reg;
|
|
|
|
base_reg = tcg_out_tlb_read(s, addr_reg, opc, mem_index, 0);
|
|
|
|
tcg_out16(s, RI_BRC | (S390_CC_NE << 4));
|
|
label_ptr = s->code_ptr;
|
|
s->code_ptr += 1;
|
|
|
|
tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
|
|
|
|
add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
|
|
#else
|
|
TCGReg index_reg;
|
|
tcg_target_long disp;
|
|
unsigned a_bits = get_alignment_bits(opc);
|
|
|
|
if (a_bits) {
|
|
tcg_out_test_alignment(s, false, addr_reg, a_bits);
|
|
}
|
|
tcg_prepare_user_ldst(s, &addr_reg, &index_reg, &disp);
|
|
tcg_out_qemu_st_direct(s, opc, data_reg, addr_reg, index_reg, disp);
|
|
#endif
|
|
}
|
|
|
|
# define OP_32_64(x) \
|
|
case glue(glue(INDEX_op_,x),_i32): \
|
|
case glue(glue(INDEX_op_,x),_i64)
|
|
|
|
static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|
const TCGArg args[TCG_MAX_OP_ARGS],
|
|
const int const_args[TCG_MAX_OP_ARGS])
|
|
{
|
|
S390Opcode op, op2;
|
|
TCGArg a0, a1, a2;
|
|
|
|
switch (opc) {
|
|
case INDEX_op_exit_tb:
|
|
/* Reuse the zeroing that exists for goto_ptr. */
|
|
a0 = args[0];
|
|
if (a0 == 0) {
|
|
tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
|
|
} else {
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, a0);
|
|
tgen_gotoi(s, S390_CC_ALWAYS, tb_ret_addr);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_goto_tb:
|
|
a0 = args[0];
|
|
if (s->tb_jmp_insn_offset) {
|
|
/*
|
|
* branch displacement must be aligned for atomic patching;
|
|
* see if we need to add extra nop before branch
|
|
*/
|
|
if (!QEMU_PTR_IS_ALIGNED(s->code_ptr + 1, 4)) {
|
|
tcg_out16(s, NOP);
|
|
}
|
|
tcg_debug_assert(!USE_REG_TB);
|
|
tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4));
|
|
s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
|
|
s->code_ptr += 2;
|
|
} else {
|
|
/* load address stored at s->tb_jmp_target_addr + a0 */
|
|
tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_REG_TB,
|
|
tcg_splitwx_to_rx(s->tb_jmp_target_addr + a0));
|
|
/* and go there */
|
|
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_TB);
|
|
}
|
|
set_jmp_reset_offset(s, a0);
|
|
|
|
/* For the unlinked path of goto_tb, we need to reset
|
|
TCG_REG_TB to the beginning of this TB. */
|
|
if (USE_REG_TB) {
|
|
int ofs = -tcg_current_code_size(s);
|
|
/* All TB are restricted to 64KiB by unwind info. */
|
|
tcg_debug_assert(ofs == sextract64(ofs, 0, 20));
|
|
tcg_out_insn(s, RXY, LAY, TCG_REG_TB,
|
|
TCG_REG_TB, TCG_REG_NONE, ofs);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_goto_ptr:
|
|
a0 = args[0];
|
|
if (USE_REG_TB) {
|
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
|
|
}
|
|
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, a0);
|
|
break;
|
|
|
|
OP_32_64(ld8u):
|
|
/* ??? LLC (RXY format) is only present with the extended-immediate
|
|
facility, whereas LLGC is always present. */
|
|
tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
|
|
break;
|
|
|
|
OP_32_64(ld8s):
|
|
/* ??? LB is no smaller than LGB, so no point to using it. */
|
|
tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
|
|
break;
|
|
|
|
OP_32_64(ld16u):
|
|
/* ??? LLH (RXY format) is only present with the extended-immediate
|
|
facility, whereas LLGH is always present. */
|
|
tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
|
|
break;
|
|
|
|
case INDEX_op_ld16s_i32:
|
|
tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, args[2]);
|
|
break;
|
|
|
|
case INDEX_op_ld_i32:
|
|
tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
|
|
break;
|
|
|
|
OP_32_64(st8):
|
|
tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
|
|
TCG_REG_NONE, args[2]);
|
|
break;
|
|
|
|
OP_32_64(st16):
|
|
tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
|
|
TCG_REG_NONE, args[2]);
|
|
break;
|
|
|
|
case INDEX_op_st_i32:
|
|
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
|
|
break;
|
|
|
|
case INDEX_op_add_i32:
|
|
a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
|
|
if (const_args[2]) {
|
|
do_addi_32:
|
|
if (a0 == a1) {
|
|
if (a2 == (int16_t)a2) {
|
|
tcg_out_insn(s, RI, AHI, a0, a2);
|
|
break;
|
|
}
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
tcg_out_insn(s, RIL, AFI, a0, a2);
|
|
break;
|
|
}
|
|
}
|
|
tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
|
|
} else if (a0 == a1) {
|
|
tcg_out_insn(s, RR, AR, a0, a2);
|
|
} else {
|
|
tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
|
|
}
|
|
break;
|
|
case INDEX_op_sub_i32:
|
|
a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
|
|
if (const_args[2]) {
|
|
a2 = -a2;
|
|
goto do_addi_32;
|
|
} else if (a0 == a1) {
|
|
tcg_out_insn(s, RR, SR, a0, a2);
|
|
} else {
|
|
tcg_out_insn(s, RRF, SRK, a0, a1, a2);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_and_i32:
|
|
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
|
|
if (const_args[2]) {
|
|
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
|
|
tgen_andi(s, TCG_TYPE_I32, a0, a2);
|
|
} else if (a0 == a1) {
|
|
tcg_out_insn(s, RR, NR, a0, a2);
|
|
} else {
|
|
tcg_out_insn(s, RRF, NRK, a0, a1, a2);
|
|
}
|
|
break;
|
|
case INDEX_op_or_i32:
|
|
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
|
|
if (const_args[2]) {
|
|
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
|
|
tgen_ori(s, TCG_TYPE_I32, a0, a2);
|
|
} else if (a0 == a1) {
|
|
tcg_out_insn(s, RR, OR, a0, a2);
|
|
} else {
|
|
tcg_out_insn(s, RRF, ORK, a0, a1, a2);
|
|
}
|
|
break;
|
|
case INDEX_op_xor_i32:
|
|
a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
|
|
if (const_args[2]) {
|
|
tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
|
|
tgen_xori(s, TCG_TYPE_I32, a0, a2);
|
|
} else if (a0 == a1) {
|
|
tcg_out_insn(s, RR, XR, args[0], args[2]);
|
|
} else {
|
|
tcg_out_insn(s, RRF, XRK, a0, a1, a2);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_neg_i32:
|
|
tcg_out_insn(s, RR, LCR, args[0], args[1]);
|
|
break;
|
|
|
|
case INDEX_op_mul_i32:
|
|
if (const_args[2]) {
|
|
if ((int32_t)args[2] == (int16_t)args[2]) {
|
|
tcg_out_insn(s, RI, MHI, args[0], args[2]);
|
|
} else {
|
|
tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
|
|
}
|
|
} else {
|
|
tcg_out_insn(s, RRE, MSR, args[0], args[2]);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_div2_i32:
|
|
tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
|
|
break;
|
|
case INDEX_op_divu2_i32:
|
|
tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
|
|
break;
|
|
|
|
case INDEX_op_shl_i32:
|
|
op = RS_SLL;
|
|
op2 = RSY_SLLK;
|
|
do_shift32:
|
|
a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
|
|
if (a0 == a1) {
|
|
if (const_args[2]) {
|
|
tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
|
|
} else {
|
|
tcg_out_sh32(s, op, a0, a2, 0);
|
|
}
|
|
} else {
|
|
/* Using tcg_out_sh64 here for the format; it is a 32-bit shift. */
|
|
if (const_args[2]) {
|
|
tcg_out_sh64(s, op2, a0, a1, TCG_REG_NONE, a2);
|
|
} else {
|
|
tcg_out_sh64(s, op2, a0, a1, a2, 0);
|
|
}
|
|
}
|
|
break;
|
|
case INDEX_op_shr_i32:
|
|
op = RS_SRL;
|
|
op2 = RSY_SRLK;
|
|
goto do_shift32;
|
|
case INDEX_op_sar_i32:
|
|
op = RS_SRA;
|
|
op2 = RSY_SRAK;
|
|
goto do_shift32;
|
|
|
|
case INDEX_op_rotl_i32:
|
|
/* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol. */
|
|
if (const_args[2]) {
|
|
tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
|
|
} else {
|
|
tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
|
|
}
|
|
break;
|
|
case INDEX_op_rotr_i32:
|
|
if (const_args[2]) {
|
|
tcg_out_sh64(s, RSY_RLL, args[0], args[1],
|
|
TCG_REG_NONE, (32 - args[2]) & 31);
|
|
} else {
|
|
tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
|
|
tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_ext8s_i32:
|
|
tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
|
|
break;
|
|
case INDEX_op_ext16s_i32:
|
|
tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
|
|
break;
|
|
case INDEX_op_ext8u_i32:
|
|
tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
|
|
break;
|
|
case INDEX_op_ext16u_i32:
|
|
tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
|
|
break;
|
|
|
|
case INDEX_op_bswap16_i32:
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
tcg_out_insn(s, RRE, LRVR, a0, a1);
|
|
if (a2 & TCG_BSWAP_OS) {
|
|
tcg_out_sh32(s, RS_SRA, a0, TCG_REG_NONE, 16);
|
|
} else {
|
|
tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
|
|
}
|
|
break;
|
|
case INDEX_op_bswap16_i64:
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
tcg_out_insn(s, RRE, LRVGR, a0, a1);
|
|
if (a2 & TCG_BSWAP_OS) {
|
|
tcg_out_sh64(s, RSY_SRAG, a0, a0, TCG_REG_NONE, 48);
|
|
} else {
|
|
tcg_out_sh64(s, RSY_SRLG, a0, a0, TCG_REG_NONE, 48);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_bswap32_i32:
|
|
tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
|
|
break;
|
|
case INDEX_op_bswap32_i64:
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
tcg_out_insn(s, RRE, LRVR, a0, a1);
|
|
if (a2 & TCG_BSWAP_OS) {
|
|
tgen_ext32s(s, a0, a0);
|
|
} else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
|
|
tgen_ext32u(s, a0, a0);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_add2_i32:
|
|
if (const_args[4]) {
|
|
tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
|
|
} else {
|
|
tcg_out_insn(s, RR, ALR, args[0], args[4]);
|
|
}
|
|
tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
|
|
break;
|
|
case INDEX_op_sub2_i32:
|
|
if (const_args[4]) {
|
|
tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
|
|
} else {
|
|
tcg_out_insn(s, RR, SLR, args[0], args[4]);
|
|
}
|
|
tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
|
|
break;
|
|
|
|
case INDEX_op_br:
|
|
tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
|
|
break;
|
|
|
|
case INDEX_op_brcond_i32:
|
|
tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
|
|
args[1], const_args[1], arg_label(args[3]));
|
|
break;
|
|
case INDEX_op_setcond_i32:
|
|
tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
|
|
args[2], const_args[2]);
|
|
break;
|
|
case INDEX_op_movcond_i32:
|
|
tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
|
|
args[2], const_args[2], args[3], const_args[3]);
|
|
break;
|
|
|
|
case INDEX_op_qemu_ld_i32:
|
|
/* ??? Technically we can use a non-extending instruction. */
|
|
case INDEX_op_qemu_ld_i64:
|
|
tcg_out_qemu_ld(s, args[0], args[1], args[2]);
|
|
break;
|
|
case INDEX_op_qemu_st_i32:
|
|
case INDEX_op_qemu_st_i64:
|
|
tcg_out_qemu_st(s, args[0], args[1], args[2]);
|
|
break;
|
|
|
|
case INDEX_op_ld16s_i64:
|
|
tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
|
|
break;
|
|
case INDEX_op_ld32u_i64:
|
|
tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
|
|
break;
|
|
case INDEX_op_ld32s_i64:
|
|
tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
|
|
break;
|
|
case INDEX_op_ld_i64:
|
|
tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
|
|
break;
|
|
|
|
case INDEX_op_st32_i64:
|
|
tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
|
|
break;
|
|
case INDEX_op_st_i64:
|
|
tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
|
|
break;
|
|
|
|
case INDEX_op_add_i64:
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
if (const_args[2]) {
|
|
do_addi_64:
|
|
if (a0 == a1) {
|
|
if (a2 == (int16_t)a2) {
|
|
tcg_out_insn(s, RI, AGHI, a0, a2);
|
|
break;
|
|
}
|
|
if (HAVE_FACILITY(EXT_IMM)) {
|
|
if (a2 == (int32_t)a2) {
|
|
tcg_out_insn(s, RIL, AGFI, a0, a2);
|
|
break;
|
|
} else if (a2 == (uint32_t)a2) {
|
|
tcg_out_insn(s, RIL, ALGFI, a0, a2);
|
|
break;
|
|
} else if (-a2 == (uint32_t)-a2) {
|
|
tcg_out_insn(s, RIL, SLGFI, a0, -a2);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
tcg_out_mem(s, RX_LA, RXY_LAY, a0, a1, TCG_REG_NONE, a2);
|
|
} else if (a0 == a1) {
|
|
tcg_out_insn(s, RRE, AGR, a0, a2);
|
|
} else {
|
|
tcg_out_insn(s, RX, LA, a0, a1, a2, 0);
|
|
}
|
|
break;
|
|
case INDEX_op_sub_i64:
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
if (const_args[2]) {
|
|
a2 = -a2;
|
|
goto do_addi_64;
|
|
} else if (a0 == a1) {
|
|
tcg_out_insn(s, RRE, SGR, a0, a2);
|
|
} else {
|
|
tcg_out_insn(s, RRF, SGRK, a0, a1, a2);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_and_i64:
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
if (const_args[2]) {
|
|
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
|
|
tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
|
|
} else if (a0 == a1) {
|
|
tcg_out_insn(s, RRE, NGR, args[0], args[2]);
|
|
} else {
|
|
tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
|
|
}
|
|
break;
|
|
case INDEX_op_or_i64:
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
if (const_args[2]) {
|
|
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
|
|
tgen_ori(s, TCG_TYPE_I64, a0, a2);
|
|
} else if (a0 == a1) {
|
|
tcg_out_insn(s, RRE, OGR, a0, a2);
|
|
} else {
|
|
tcg_out_insn(s, RRF, OGRK, a0, a1, a2);
|
|
}
|
|
break;
|
|
case INDEX_op_xor_i64:
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
if (const_args[2]) {
|
|
tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
|
|
tgen_xori(s, TCG_TYPE_I64, a0, a2);
|
|
} else if (a0 == a1) {
|
|
tcg_out_insn(s, RRE, XGR, a0, a2);
|
|
} else {
|
|
tcg_out_insn(s, RRF, XGRK, a0, a1, a2);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_neg_i64:
|
|
tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
|
|
break;
|
|
case INDEX_op_bswap64_i64:
|
|
tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
|
|
break;
|
|
|
|
case INDEX_op_mul_i64:
|
|
if (const_args[2]) {
|
|
if (args[2] == (int16_t)args[2]) {
|
|
tcg_out_insn(s, RI, MGHI, args[0], args[2]);
|
|
} else {
|
|
tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
|
|
}
|
|
} else {
|
|
tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_div2_i64:
|
|
/* ??? We get an unnecessary sign-extension of the dividend
|
|
into R3 with this definition, but as we do in fact always
|
|
produce both quotient and remainder using INDEX_op_div_i64
|
|
instead requires jumping through even more hoops. */
|
|
tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
|
|
break;
|
|
case INDEX_op_divu2_i64:
|
|
tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
|
|
break;
|
|
case INDEX_op_mulu2_i64:
|
|
tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
|
|
break;
|
|
|
|
case INDEX_op_shl_i64:
|
|
op = RSY_SLLG;
|
|
do_shift64:
|
|
if (const_args[2]) {
|
|
tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
|
|
} else {
|
|
tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
|
|
}
|
|
break;
|
|
case INDEX_op_shr_i64:
|
|
op = RSY_SRLG;
|
|
goto do_shift64;
|
|
case INDEX_op_sar_i64:
|
|
op = RSY_SRAG;
|
|
goto do_shift64;
|
|
|
|
case INDEX_op_rotl_i64:
|
|
if (const_args[2]) {
|
|
tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
|
|
TCG_REG_NONE, args[2]);
|
|
} else {
|
|
tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
|
|
}
|
|
break;
|
|
case INDEX_op_rotr_i64:
|
|
if (const_args[2]) {
|
|
tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
|
|
TCG_REG_NONE, (64 - args[2]) & 63);
|
|
} else {
|
|
/* We can use the smaller 32-bit negate because only the
|
|
low 6 bits are examined for the rotate. */
|
|
tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
|
|
tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_ext8s_i64:
|
|
tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
|
|
break;
|
|
case INDEX_op_ext16s_i64:
|
|
tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
|
|
break;
|
|
case INDEX_op_ext_i32_i64:
|
|
case INDEX_op_ext32s_i64:
|
|
tgen_ext32s(s, args[0], args[1]);
|
|
break;
|
|
case INDEX_op_ext8u_i64:
|
|
tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
|
|
break;
|
|
case INDEX_op_ext16u_i64:
|
|
tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
|
|
break;
|
|
case INDEX_op_extu_i32_i64:
|
|
case INDEX_op_ext32u_i64:
|
|
tgen_ext32u(s, args[0], args[1]);
|
|
break;
|
|
|
|
case INDEX_op_add2_i64:
|
|
if (const_args[4]) {
|
|
if ((int64_t)args[4] >= 0) {
|
|
tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
|
|
} else {
|
|
tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
|
|
}
|
|
} else {
|
|
tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
|
|
}
|
|
tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
|
|
break;
|
|
case INDEX_op_sub2_i64:
|
|
if (const_args[4]) {
|
|
if ((int64_t)args[4] >= 0) {
|
|
tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
|
|
} else {
|
|
tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
|
|
}
|
|
} else {
|
|
tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
|
|
}
|
|
tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
|
|
break;
|
|
|
|
case INDEX_op_brcond_i64:
|
|
tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
|
|
args[1], const_args[1], arg_label(args[3]));
|
|
break;
|
|
case INDEX_op_setcond_i64:
|
|
tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
|
|
args[2], const_args[2]);
|
|
break;
|
|
case INDEX_op_movcond_i64:
|
|
tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
|
|
args[2], const_args[2], args[3], const_args[3]);
|
|
break;
|
|
|
|
OP_32_64(deposit):
|
|
a0 = args[0], a1 = args[1], a2 = args[2];
|
|
if (const_args[1]) {
|
|
tgen_deposit(s, a0, a2, args[3], args[4], 1);
|
|
} else {
|
|
/* Since we can't support "0Z" as a constraint, we allow a1 in
|
|
any register. Fix things up as if a matching constraint. */
|
|
if (a0 != a1) {
|
|
TCGType type = (opc == INDEX_op_deposit_i64);
|
|
if (a0 == a2) {
|
|
tcg_out_mov(s, type, TCG_TMP0, a2);
|
|
a2 = TCG_TMP0;
|
|
}
|
|
tcg_out_mov(s, type, a0, a1);
|
|
}
|
|
tgen_deposit(s, a0, a2, args[3], args[4], 0);
|
|
}
|
|
break;
|
|
|
|
OP_32_64(extract):
|
|
tgen_extract(s, args[0], args[1], args[2], args[3]);
|
|
break;
|
|
|
|
case INDEX_op_clz_i64:
|
|
tgen_clz(s, args[0], args[1], args[2], const_args[2]);
|
|
break;
|
|
|
|
case INDEX_op_mb:
|
|
/* The host memory model is quite strong, we simply need to
|
|
serialize the instruction stream. */
|
|
if (args[0] & TCG_MO_ST_LD) {
|
|
tcg_out_insn(s, RR, BCR, HAVE_FACILITY(FAST_BCR_SER) ? 14 : 15, 0);
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
|
|
case INDEX_op_mov_i64:
|
|
case INDEX_op_call: /* Always emitted via tcg_out_call. */
|
|
default:
|
|
tcg_abort();
|
|
}
|
|
}
|
|
|
|
static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
TCGReg dst, TCGReg src)
|
|
{
|
|
if (is_general_reg(src)) {
|
|
/* Replicate general register into two MO_64. */
|
|
tcg_out_insn(s, VRRf, VLVGP, dst, src, src);
|
|
if (vece == MO_64) {
|
|
return true;
|
|
}
|
|
src = dst;
|
|
}
|
|
|
|
/*
|
|
* Recall that the "standard" integer, within a vector, is the
|
|
* rightmost element of the leftmost doubleword, a-la VLLEZ.
|
|
*/
|
|
tcg_out_insn(s, VRIc, VREP, dst, (8 >> vece) - 1, src, vece);
|
|
return true;
|
|
}
|
|
|
|
static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
TCGReg dst, TCGReg base, intptr_t offset)
|
|
{
|
|
tcg_out_vrx_mem(s, VRX_VLREP, dst, base, TCG_REG_NONE, offset, vece);
|
|
return true;
|
|
}
|
|
|
|
static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
TCGReg dst, int64_t val)
|
|
{
|
|
int i, mask, msb, lsb;
|
|
|
|
/* Look for int16_t elements. */
|
|
if (vece <= MO_16 ||
|
|
(vece == MO_32 ? (int32_t)val : val) == (int16_t)val) {
|
|
tcg_out_insn(s, VRIa, VREPI, dst, val, vece);
|
|
return;
|
|
}
|
|
|
|
/* Look for bit masks. */
|
|
if (vece == MO_32) {
|
|
if (risbg_mask((int32_t)val)) {
|
|
/* Handle wraparound by swapping msb and lsb. */
|
|
if ((val & 0x80000001u) == 0x80000001u) {
|
|
msb = 32 - ctz32(~val);
|
|
lsb = clz32(~val) - 1;
|
|
} else {
|
|
msb = clz32(val);
|
|
lsb = 31 - ctz32(val);
|
|
}
|
|
tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_32);
|
|
return;
|
|
}
|
|
} else {
|
|
if (risbg_mask(val)) {
|
|
/* Handle wraparound by swapping msb and lsb. */
|
|
if ((val & 0x8000000000000001ull) == 0x8000000000000001ull) {
|
|
/* Handle wraparound by swapping msb and lsb. */
|
|
msb = 64 - ctz64(~val);
|
|
lsb = clz64(~val) - 1;
|
|
} else {
|
|
msb = clz64(val);
|
|
lsb = 63 - ctz64(val);
|
|
}
|
|
tcg_out_insn(s, VRIb, VGM, dst, msb, lsb, MO_64);
|
|
return;
|
|
}
|
|
}
|
|
|
|
/* Look for all bytes 0x00 or 0xff. */
|
|
for (i = mask = 0; i < 8; i++) {
|
|
uint8_t byte = val >> (i * 8);
|
|
if (byte == 0xff) {
|
|
mask |= 1 << i;
|
|
} else if (byte != 0) {
|
|
break;
|
|
}
|
|
}
|
|
if (i == 8) {
|
|
tcg_out_insn(s, VRIa, VGBM, dst, mask * 0x0101, 0);
|
|
return;
|
|
}
|
|
|
|
/* Otherwise, stuff it in the constant pool. */
|
|
tcg_out_insn(s, RIL, LARL, TCG_TMP0, 0);
|
|
new_pool_label(s, val, R_390_PC32DBL, s->code_ptr - 2, 2);
|
|
tcg_out_insn(s, VRX, VLREP, dst, TCG_TMP0, TCG_REG_NONE, 0, MO_64);
|
|
}
|
|
|
|
static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
|
|
unsigned vecl, unsigned vece,
|
|
const TCGArg args[TCG_MAX_OP_ARGS],
|
|
const int const_args[TCG_MAX_OP_ARGS])
|
|
{
|
|
TCGType type = vecl + TCG_TYPE_V64;
|
|
TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
|
|
|
|
switch (opc) {
|
|
case INDEX_op_ld_vec:
|
|
tcg_out_ld(s, type, a0, a1, a2);
|
|
break;
|
|
case INDEX_op_st_vec:
|
|
tcg_out_st(s, type, a0, a1, a2);
|
|
break;
|
|
case INDEX_op_dupm_vec:
|
|
tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
|
|
break;
|
|
|
|
case INDEX_op_abs_vec:
|
|
tcg_out_insn(s, VRRa, VLP, a0, a1, vece);
|
|
break;
|
|
case INDEX_op_neg_vec:
|
|
tcg_out_insn(s, VRRa, VLC, a0, a1, vece);
|
|
break;
|
|
case INDEX_op_not_vec:
|
|
tcg_out_insn(s, VRRc, VNO, a0, a1, a1, 0);
|
|
break;
|
|
|
|
case INDEX_op_add_vec:
|
|
tcg_out_insn(s, VRRc, VA, a0, a1, a2, vece);
|
|
break;
|
|
case INDEX_op_sub_vec:
|
|
tcg_out_insn(s, VRRc, VS, a0, a1, a2, vece);
|
|
break;
|
|
case INDEX_op_and_vec:
|
|
tcg_out_insn(s, VRRc, VN, a0, a1, a2, 0);
|
|
break;
|
|
case INDEX_op_andc_vec:
|
|
tcg_out_insn(s, VRRc, VNC, a0, a1, a2, 0);
|
|
break;
|
|
case INDEX_op_mul_vec:
|
|
tcg_out_insn(s, VRRc, VML, a0, a1, a2, vece);
|
|
break;
|
|
case INDEX_op_or_vec:
|
|
tcg_out_insn(s, VRRc, VO, a0, a1, a2, 0);
|
|
break;
|
|
case INDEX_op_orc_vec:
|
|
tcg_out_insn(s, VRRc, VOC, a0, a1, a2, 0);
|
|
break;
|
|
case INDEX_op_xor_vec:
|
|
tcg_out_insn(s, VRRc, VX, a0, a1, a2, 0);
|
|
break;
|
|
case INDEX_op_nand_vec:
|
|
tcg_out_insn(s, VRRc, VNN, a0, a1, a2, 0);
|
|
break;
|
|
case INDEX_op_nor_vec:
|
|
tcg_out_insn(s, VRRc, VNO, a0, a1, a2, 0);
|
|
break;
|
|
case INDEX_op_eqv_vec:
|
|
tcg_out_insn(s, VRRc, VNX, a0, a1, a2, 0);
|
|
break;
|
|
|
|
case INDEX_op_shli_vec:
|
|
tcg_out_insn(s, VRSa, VESL, a0, a2, TCG_REG_NONE, a1, vece);
|
|
break;
|
|
case INDEX_op_shri_vec:
|
|
tcg_out_insn(s, VRSa, VESRL, a0, a2, TCG_REG_NONE, a1, vece);
|
|
break;
|
|
case INDEX_op_sari_vec:
|
|
tcg_out_insn(s, VRSa, VESRA, a0, a2, TCG_REG_NONE, a1, vece);
|
|
break;
|
|
case INDEX_op_rotli_vec:
|
|
tcg_out_insn(s, VRSa, VERLL, a0, a2, TCG_REG_NONE, a1, vece);
|
|
break;
|
|
case INDEX_op_shls_vec:
|
|
tcg_out_insn(s, VRSa, VESL, a0, 0, a2, a1, vece);
|
|
break;
|
|
case INDEX_op_shrs_vec:
|
|
tcg_out_insn(s, VRSa, VESRL, a0, 0, a2, a1, vece);
|
|
break;
|
|
case INDEX_op_sars_vec:
|
|
tcg_out_insn(s, VRSa, VESRA, a0, 0, a2, a1, vece);
|
|
break;
|
|
case INDEX_op_rotls_vec:
|
|
tcg_out_insn(s, VRSa, VERLL, a0, 0, a2, a1, vece);
|
|
break;
|
|
case INDEX_op_shlv_vec:
|
|
tcg_out_insn(s, VRRc, VESLV, a0, a1, a2, vece);
|
|
break;
|
|
case INDEX_op_shrv_vec:
|
|
tcg_out_insn(s, VRRc, VESRLV, a0, a1, a2, vece);
|
|
break;
|
|
case INDEX_op_sarv_vec:
|
|
tcg_out_insn(s, VRRc, VESRAV, a0, a1, a2, vece);
|
|
break;
|
|
case INDEX_op_rotlv_vec:
|
|
tcg_out_insn(s, VRRc, VERLLV, a0, a1, a2, vece);
|
|
break;
|
|
|
|
case INDEX_op_smin_vec:
|
|
tcg_out_insn(s, VRRc, VMN, a0, a1, a2, vece);
|
|
break;
|
|
case INDEX_op_smax_vec:
|
|
tcg_out_insn(s, VRRc, VMX, a0, a1, a2, vece);
|
|
break;
|
|
case INDEX_op_umin_vec:
|
|
tcg_out_insn(s, VRRc, VMNL, a0, a1, a2, vece);
|
|
break;
|
|
case INDEX_op_umax_vec:
|
|
tcg_out_insn(s, VRRc, VMXL, a0, a1, a2, vece);
|
|
break;
|
|
|
|
case INDEX_op_bitsel_vec:
|
|
tcg_out_insn(s, VRRe, VSEL, a0, a2, args[3], a1);
|
|
break;
|
|
|
|
case INDEX_op_cmp_vec:
|
|
switch ((TCGCond)args[3]) {
|
|
case TCG_COND_EQ:
|
|
tcg_out_insn(s, VRRc, VCEQ, a0, a1, a2, vece);
|
|
break;
|
|
case TCG_COND_GT:
|
|
tcg_out_insn(s, VRRc, VCH, a0, a1, a2, vece);
|
|
break;
|
|
case TCG_COND_GTU:
|
|
tcg_out_insn(s, VRRc, VCHL, a0, a1, a2, vece);
|
|
break;
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
break;
|
|
|
|
case INDEX_op_s390_vuph_vec:
|
|
tcg_out_insn(s, VRRa, VUPH, a0, a1, vece);
|
|
break;
|
|
case INDEX_op_s390_vupl_vec:
|
|
tcg_out_insn(s, VRRa, VUPL, a0, a1, vece);
|
|
break;
|
|
case INDEX_op_s390_vpks_vec:
|
|
tcg_out_insn(s, VRRc, VPKS, a0, a1, a2, vece);
|
|
break;
|
|
|
|
case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
|
|
case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
|
|
{
|
|
switch (opc) {
|
|
case INDEX_op_abs_vec:
|
|
case INDEX_op_add_vec:
|
|
case INDEX_op_and_vec:
|
|
case INDEX_op_andc_vec:
|
|
case INDEX_op_bitsel_vec:
|
|
case INDEX_op_eqv_vec:
|
|
case INDEX_op_nand_vec:
|
|
case INDEX_op_neg_vec:
|
|
case INDEX_op_nor_vec:
|
|
case INDEX_op_not_vec:
|
|
case INDEX_op_or_vec:
|
|
case INDEX_op_orc_vec:
|
|
case INDEX_op_rotli_vec:
|
|
case INDEX_op_rotls_vec:
|
|
case INDEX_op_rotlv_vec:
|
|
case INDEX_op_sari_vec:
|
|
case INDEX_op_sars_vec:
|
|
case INDEX_op_sarv_vec:
|
|
case INDEX_op_shli_vec:
|
|
case INDEX_op_shls_vec:
|
|
case INDEX_op_shlv_vec:
|
|
case INDEX_op_shri_vec:
|
|
case INDEX_op_shrs_vec:
|
|
case INDEX_op_shrv_vec:
|
|
case INDEX_op_smax_vec:
|
|
case INDEX_op_smin_vec:
|
|
case INDEX_op_sub_vec:
|
|
case INDEX_op_umax_vec:
|
|
case INDEX_op_umin_vec:
|
|
case INDEX_op_xor_vec:
|
|
return 1;
|
|
case INDEX_op_cmp_vec:
|
|
case INDEX_op_cmpsel_vec:
|
|
case INDEX_op_rotrv_vec:
|
|
return -1;
|
|
case INDEX_op_mul_vec:
|
|
return vece < MO_64;
|
|
case INDEX_op_ssadd_vec:
|
|
case INDEX_op_sssub_vec:
|
|
return vece < MO_64 ? -1 : 0;
|
|
default:
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
static bool expand_vec_cmp_noinv(TCGType type, unsigned vece, TCGv_vec v0,
|
|
TCGv_vec v1, TCGv_vec v2, TCGCond cond)
|
|
{
|
|
bool need_swap = false, need_inv = false;
|
|
|
|
switch (cond) {
|
|
case TCG_COND_EQ:
|
|
case TCG_COND_GT:
|
|
case TCG_COND_GTU:
|
|
break;
|
|
case TCG_COND_NE:
|
|
case TCG_COND_LE:
|
|
case TCG_COND_LEU:
|
|
need_inv = true;
|
|
break;
|
|
case TCG_COND_LT:
|
|
case TCG_COND_LTU:
|
|
need_swap = true;
|
|
break;
|
|
case TCG_COND_GE:
|
|
case TCG_COND_GEU:
|
|
need_swap = need_inv = true;
|
|
break;
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
|
|
if (need_inv) {
|
|
cond = tcg_invert_cond(cond);
|
|
}
|
|
if (need_swap) {
|
|
TCGv_vec t1;
|
|
t1 = v1, v1 = v2, v2 = t1;
|
|
cond = tcg_swap_cond(cond);
|
|
}
|
|
|
|
vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
|
|
tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
|
|
|
|
return need_inv;
|
|
}
|
|
|
|
static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
|
|
TCGv_vec v1, TCGv_vec v2, TCGCond cond)
|
|
{
|
|
if (expand_vec_cmp_noinv(type, vece, v0, v1, v2, cond)) {
|
|
tcg_gen_not_vec(vece, v0, v0);
|
|
}
|
|
}
|
|
|
|
static void expand_vec_cmpsel(TCGType type, unsigned vece, TCGv_vec v0,
|
|
TCGv_vec c1, TCGv_vec c2,
|
|
TCGv_vec v3, TCGv_vec v4, TCGCond cond)
|
|
{
|
|
TCGv_vec t = tcg_temp_new_vec(type);
|
|
|
|
if (expand_vec_cmp_noinv(type, vece, t, c1, c2, cond)) {
|
|
/* Invert the sense of the compare by swapping arguments. */
|
|
tcg_gen_bitsel_vec(vece, v0, t, v4, v3);
|
|
} else {
|
|
tcg_gen_bitsel_vec(vece, v0, t, v3, v4);
|
|
}
|
|
tcg_temp_free_vec(t);
|
|
}
|
|
|
|
static void expand_vec_sat(TCGType type, unsigned vece, TCGv_vec v0,
|
|
TCGv_vec v1, TCGv_vec v2, TCGOpcode add_sub_opc)
|
|
{
|
|
TCGv_vec h1 = tcg_temp_new_vec(type);
|
|
TCGv_vec h2 = tcg_temp_new_vec(type);
|
|
TCGv_vec l1 = tcg_temp_new_vec(type);
|
|
TCGv_vec l2 = tcg_temp_new_vec(type);
|
|
|
|
tcg_debug_assert (vece < MO_64);
|
|
|
|
/* Unpack with sign-extension. */
|
|
vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
|
|
tcgv_vec_arg(h1), tcgv_vec_arg(v1));
|
|
vec_gen_2(INDEX_op_s390_vuph_vec, type, vece,
|
|
tcgv_vec_arg(h2), tcgv_vec_arg(v2));
|
|
|
|
vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
|
|
tcgv_vec_arg(l1), tcgv_vec_arg(v1));
|
|
vec_gen_2(INDEX_op_s390_vupl_vec, type, vece,
|
|
tcgv_vec_arg(l2), tcgv_vec_arg(v2));
|
|
|
|
/* Arithmetic on a wider element size. */
|
|
vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(h1),
|
|
tcgv_vec_arg(h1), tcgv_vec_arg(h2));
|
|
vec_gen_3(add_sub_opc, type, vece + 1, tcgv_vec_arg(l1),
|
|
tcgv_vec_arg(l1), tcgv_vec_arg(l2));
|
|
|
|
/* Pack with saturation. */
|
|
vec_gen_3(INDEX_op_s390_vpks_vec, type, vece + 1,
|
|
tcgv_vec_arg(v0), tcgv_vec_arg(h1), tcgv_vec_arg(l1));
|
|
|
|
tcg_temp_free_vec(h1);
|
|
tcg_temp_free_vec(h2);
|
|
tcg_temp_free_vec(l1);
|
|
tcg_temp_free_vec(l2);
|
|
}
|
|
|
|
void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
|
|
TCGArg a0, ...)
|
|
{
|
|
va_list va;
|
|
TCGv_vec v0, v1, v2, v3, v4, t0;
|
|
|
|
va_start(va, a0);
|
|
v0 = temp_tcgv_vec(arg_temp(a0));
|
|
v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
|
|
v2 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
|
|
|
|
switch (opc) {
|
|
case INDEX_op_cmp_vec:
|
|
expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
|
|
break;
|
|
|
|
case INDEX_op_cmpsel_vec:
|
|
v3 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
|
|
v4 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
|
|
expand_vec_cmpsel(type, vece, v0, v1, v2, v3, v4, va_arg(va, TCGArg));
|
|
break;
|
|
|
|
case INDEX_op_rotrv_vec:
|
|
t0 = tcg_temp_new_vec(type);
|
|
tcg_gen_neg_vec(vece, t0, v2);
|
|
tcg_gen_rotlv_vec(vece, v0, v1, t0);
|
|
tcg_temp_free_vec(t0);
|
|
break;
|
|
|
|
case INDEX_op_ssadd_vec:
|
|
expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_add_vec);
|
|
break;
|
|
case INDEX_op_sssub_vec:
|
|
expand_vec_sat(type, vece, v0, v1, v2, INDEX_op_sub_vec);
|
|
break;
|
|
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
va_end(va);
|
|
}
|
|
|
|
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
|
|
{
|
|
switch (op) {
|
|
case INDEX_op_goto_ptr:
|
|
return C_O0_I1(r);
|
|
|
|
case INDEX_op_ld8u_i32:
|
|
case INDEX_op_ld8u_i64:
|
|
case INDEX_op_ld8s_i32:
|
|
case INDEX_op_ld8s_i64:
|
|
case INDEX_op_ld16u_i32:
|
|
case INDEX_op_ld16u_i64:
|
|
case INDEX_op_ld16s_i32:
|
|
case INDEX_op_ld16s_i64:
|
|
case INDEX_op_ld_i32:
|
|
case INDEX_op_ld32u_i64:
|
|
case INDEX_op_ld32s_i64:
|
|
case INDEX_op_ld_i64:
|
|
return C_O1_I1(r, r);
|
|
|
|
case INDEX_op_st8_i32:
|
|
case INDEX_op_st8_i64:
|
|
case INDEX_op_st16_i32:
|
|
case INDEX_op_st16_i64:
|
|
case INDEX_op_st_i32:
|
|
case INDEX_op_st32_i64:
|
|
case INDEX_op_st_i64:
|
|
return C_O0_I2(r, r);
|
|
|
|
case INDEX_op_add_i32:
|
|
case INDEX_op_add_i64:
|
|
case INDEX_op_shl_i64:
|
|
case INDEX_op_shr_i64:
|
|
case INDEX_op_sar_i64:
|
|
case INDEX_op_rotl_i32:
|
|
case INDEX_op_rotl_i64:
|
|
case INDEX_op_rotr_i32:
|
|
case INDEX_op_rotr_i64:
|
|
case INDEX_op_clz_i64:
|
|
case INDEX_op_setcond_i32:
|
|
case INDEX_op_setcond_i64:
|
|
return C_O1_I2(r, r, ri);
|
|
|
|
case INDEX_op_sub_i32:
|
|
case INDEX_op_sub_i64:
|
|
case INDEX_op_and_i32:
|
|
case INDEX_op_and_i64:
|
|
case INDEX_op_or_i32:
|
|
case INDEX_op_or_i64:
|
|
case INDEX_op_xor_i32:
|
|
case INDEX_op_xor_i64:
|
|
return (HAVE_FACILITY(DISTINCT_OPS)
|
|
? C_O1_I2(r, r, ri)
|
|
: C_O1_I2(r, 0, ri));
|
|
|
|
case INDEX_op_mul_i32:
|
|
/* If we have the general-instruction-extensions, then we have
|
|
MULTIPLY SINGLE IMMEDIATE with a signed 32-bit, otherwise we
|
|
have only MULTIPLY HALFWORD IMMEDIATE, with a signed 16-bit. */
|
|
return (HAVE_FACILITY(GEN_INST_EXT)
|
|
? C_O1_I2(r, 0, ri)
|
|
: C_O1_I2(r, 0, rI));
|
|
|
|
case INDEX_op_mul_i64:
|
|
return (HAVE_FACILITY(GEN_INST_EXT)
|
|
? C_O1_I2(r, 0, rJ)
|
|
: C_O1_I2(r, 0, rI));
|
|
|
|
case INDEX_op_shl_i32:
|
|
case INDEX_op_shr_i32:
|
|
case INDEX_op_sar_i32:
|
|
return (HAVE_FACILITY(DISTINCT_OPS)
|
|
? C_O1_I2(r, r, ri)
|
|
: C_O1_I2(r, 0, ri));
|
|
|
|
case INDEX_op_brcond_i32:
|
|
case INDEX_op_brcond_i64:
|
|
return C_O0_I2(r, ri);
|
|
|
|
case INDEX_op_bswap16_i32:
|
|
case INDEX_op_bswap16_i64:
|
|
case INDEX_op_bswap32_i32:
|
|
case INDEX_op_bswap32_i64:
|
|
case INDEX_op_bswap64_i64:
|
|
case INDEX_op_neg_i32:
|
|
case INDEX_op_neg_i64:
|
|
case INDEX_op_ext8s_i32:
|
|
case INDEX_op_ext8s_i64:
|
|
case INDEX_op_ext8u_i32:
|
|
case INDEX_op_ext8u_i64:
|
|
case INDEX_op_ext16s_i32:
|
|
case INDEX_op_ext16s_i64:
|
|
case INDEX_op_ext16u_i32:
|
|
case INDEX_op_ext16u_i64:
|
|
case INDEX_op_ext32s_i64:
|
|
case INDEX_op_ext32u_i64:
|
|
case INDEX_op_ext_i32_i64:
|
|
case INDEX_op_extu_i32_i64:
|
|
case INDEX_op_extract_i32:
|
|
case INDEX_op_extract_i64:
|
|
return C_O1_I1(r, r);
|
|
|
|
case INDEX_op_qemu_ld_i32:
|
|
case INDEX_op_qemu_ld_i64:
|
|
return C_O1_I1(r, L);
|
|
case INDEX_op_qemu_st_i64:
|
|
case INDEX_op_qemu_st_i32:
|
|
return C_O0_I2(L, L);
|
|
|
|
case INDEX_op_deposit_i32:
|
|
case INDEX_op_deposit_i64:
|
|
return C_O1_I2(r, rZ, r);
|
|
|
|
case INDEX_op_movcond_i32:
|
|
case INDEX_op_movcond_i64:
|
|
return (HAVE_FACILITY(LOAD_ON_COND2)
|
|
? C_O1_I4(r, r, ri, rI, 0)
|
|
: C_O1_I4(r, r, ri, r, 0));
|
|
|
|
case INDEX_op_div2_i32:
|
|
case INDEX_op_div2_i64:
|
|
case INDEX_op_divu2_i32:
|
|
case INDEX_op_divu2_i64:
|
|
return C_O2_I3(b, a, 0, 1, r);
|
|
|
|
case INDEX_op_mulu2_i64:
|
|
return C_O2_I2(b, a, 0, r);
|
|
|
|
case INDEX_op_add2_i32:
|
|
case INDEX_op_sub2_i32:
|
|
return (HAVE_FACILITY(EXT_IMM)
|
|
? C_O2_I4(r, r, 0, 1, ri, r)
|
|
: C_O2_I4(r, r, 0, 1, r, r));
|
|
|
|
case INDEX_op_add2_i64:
|
|
case INDEX_op_sub2_i64:
|
|
return (HAVE_FACILITY(EXT_IMM)
|
|
? C_O2_I4(r, r, 0, 1, rA, r)
|
|
: C_O2_I4(r, r, 0, 1, r, r));
|
|
|
|
case INDEX_op_st_vec:
|
|
return C_O0_I2(v, r);
|
|
case INDEX_op_ld_vec:
|
|
case INDEX_op_dupm_vec:
|
|
return C_O1_I1(v, r);
|
|
case INDEX_op_dup_vec:
|
|
return C_O1_I1(v, vr);
|
|
case INDEX_op_abs_vec:
|
|
case INDEX_op_neg_vec:
|
|
case INDEX_op_not_vec:
|
|
case INDEX_op_rotli_vec:
|
|
case INDEX_op_sari_vec:
|
|
case INDEX_op_shli_vec:
|
|
case INDEX_op_shri_vec:
|
|
case INDEX_op_s390_vuph_vec:
|
|
case INDEX_op_s390_vupl_vec:
|
|
return C_O1_I1(v, v);
|
|
case INDEX_op_add_vec:
|
|
case INDEX_op_sub_vec:
|
|
case INDEX_op_and_vec:
|
|
case INDEX_op_andc_vec:
|
|
case INDEX_op_or_vec:
|
|
case INDEX_op_orc_vec:
|
|
case INDEX_op_xor_vec:
|
|
case INDEX_op_nand_vec:
|
|
case INDEX_op_nor_vec:
|
|
case INDEX_op_eqv_vec:
|
|
case INDEX_op_cmp_vec:
|
|
case INDEX_op_mul_vec:
|
|
case INDEX_op_rotlv_vec:
|
|
case INDEX_op_rotrv_vec:
|
|
case INDEX_op_shlv_vec:
|
|
case INDEX_op_shrv_vec:
|
|
case INDEX_op_sarv_vec:
|
|
case INDEX_op_smax_vec:
|
|
case INDEX_op_smin_vec:
|
|
case INDEX_op_umax_vec:
|
|
case INDEX_op_umin_vec:
|
|
case INDEX_op_s390_vpks_vec:
|
|
return C_O1_I2(v, v, v);
|
|
case INDEX_op_rotls_vec:
|
|
case INDEX_op_shls_vec:
|
|
case INDEX_op_shrs_vec:
|
|
case INDEX_op_sars_vec:
|
|
return C_O1_I2(v, v, r);
|
|
case INDEX_op_bitsel_vec:
|
|
return C_O1_I3(v, v, v, v);
|
|
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Mainline glibc added HWCAP_S390_VX before it was kernel abi.
|
|
* Some distros have fixed this up locally, others have not.
|
|
*/
|
|
#ifndef HWCAP_S390_VXRS
|
|
#define HWCAP_S390_VXRS 2048
|
|
#endif
|
|
|
|
static void query_s390_facilities(void)
|
|
{
|
|
unsigned long hwcap = qemu_getauxval(AT_HWCAP);
|
|
|
|
/* Is STORE FACILITY LIST EXTENDED available? Honestly, I believe this
|
|
is present on all 64-bit systems, but let's check for it anyway. */
|
|
if (hwcap & HWCAP_S390_STFLE) {
|
|
register int r0 __asm__("0") = ARRAY_SIZE(s390_facilities) - 1;
|
|
register void *r1 __asm__("1") = s390_facilities;
|
|
|
|
/* stfle 0(%r1) */
|
|
asm volatile(".word 0xb2b0,0x1000"
|
|
: "=r"(r0) : "r"(r0), "r"(r1) : "memory", "cc");
|
|
}
|
|
|
|
/*
|
|
* Use of vector registers requires os support beyond the facility bit.
|
|
* If the kernel does not advertise support, disable the facility bits.
|
|
* There is nothing else we currently care about in the 3rd word, so
|
|
* disable VECTOR with one store.
|
|
*/
|
|
if (!(hwcap & HWCAP_S390_VXRS)) {
|
|
s390_facilities[2] = 0;
|
|
}
|
|
}
|
|
|
|
static void tcg_target_init(TCGContext *s)
|
|
{
|
|
query_s390_facilities();
|
|
|
|
tcg_target_available_regs[TCG_TYPE_I32] = 0xffff;
|
|
tcg_target_available_regs[TCG_TYPE_I64] = 0xffff;
|
|
if (HAVE_FACILITY(VECTOR)) {
|
|
tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
|
|
tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
|
|
}
|
|
|
|
tcg_target_call_clobber_regs = 0;
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R1);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
|
|
/* The r6 register is technically call-saved, but it's also a parameter
|
|
register, so it can get killed by setup for the qemu_st helper. */
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
|
|
/* The return register can be considered call-clobbered. */
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R14);
|
|
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V20);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V21);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V22);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V23);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
|
|
tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
|
|
|
|
s->reserved_regs = 0;
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_TMP0);
|
|
/* XXX many insns can't be used with R0, so we better avoid it for now */
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
|
|
if (USE_REG_TB) {
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
|
|
}
|
|
}
|
|
|
|
#define FRAME_SIZE ((int)(TCG_TARGET_CALL_STACK_OFFSET \
|
|
+ TCG_STATIC_CALL_ARGS_SIZE \
|
|
+ CPU_TEMP_BUF_NLONGS * sizeof(long)))
|
|
|
|
static void tcg_target_qemu_prologue(TCGContext *s)
|
|
{
|
|
/* stmg %r6,%r15,48(%r15) (save registers) */
|
|
tcg_out_insn(s, RXY, STMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15, 48);
|
|
|
|
/* aghi %r15,-frame_size */
|
|
tcg_out_insn(s, RI, AGHI, TCG_REG_R15, -FRAME_SIZE);
|
|
|
|
tcg_set_frame(s, TCG_REG_CALL_STACK,
|
|
TCG_STATIC_CALL_ARGS_SIZE + TCG_TARGET_CALL_STACK_OFFSET,
|
|
CPU_TEMP_BUF_NLONGS * sizeof(long));
|
|
|
|
#ifndef CONFIG_SOFTMMU
|
|
if (guest_base >= 0x80000) {
|
|
tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
|
|
}
|
|
#endif
|
|
|
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
|
|
if (USE_REG_TB) {
|
|
tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB,
|
|
tcg_target_call_iarg_regs[1]);
|
|
}
|
|
|
|
/* br %r3 (go to TB) */
|
|
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, tcg_target_call_iarg_regs[1]);
|
|
|
|
/*
|
|
* Return path for goto_ptr. Set return value to 0, a-la exit_tb,
|
|
* and fall through to the rest of the epilogue.
|
|
*/
|
|
tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
|
|
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, 0);
|
|
|
|
/* TB epilogue */
|
|
tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
|
|
|
|
/* lmg %r6,%r15,fs+48(%r15) (restore registers) */
|
|
tcg_out_insn(s, RXY, LMG, TCG_REG_R6, TCG_REG_R15, TCG_REG_R15,
|
|
FRAME_SIZE + 48);
|
|
|
|
/* br %r14 (return) */
|
|
tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_REG_R14);
|
|
}
|
|
|
|
static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
|
|
{
|
|
memset(p, 0x07, count * sizeof(tcg_insn_unit));
|
|
}
|
|
|
|
typedef struct {
|
|
DebugFrameHeader h;
|
|
uint8_t fde_def_cfa[4];
|
|
uint8_t fde_reg_ofs[18];
|
|
} DebugFrame;
|
|
|
|
/* We're expecting a 2 byte uleb128 encoded value. */
|
|
QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
|
|
|
|
#define ELF_HOST_MACHINE EM_S390
|
|
|
|
static const DebugFrame debug_frame = {
|
|
.h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
|
|
.h.cie.id = -1,
|
|
.h.cie.version = 1,
|
|
.h.cie.code_align = 1,
|
|
.h.cie.data_align = 8, /* sleb128 8 */
|
|
.h.cie.return_column = TCG_REG_R14,
|
|
|
|
/* Total FDE size does not include the "len" member. */
|
|
.h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
|
|
|
|
.fde_def_cfa = {
|
|
12, TCG_REG_CALL_STACK, /* DW_CFA_def_cfa %r15, ... */
|
|
(FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
|
|
(FRAME_SIZE >> 7)
|
|
},
|
|
.fde_reg_ofs = {
|
|
0x86, 6, /* DW_CFA_offset, %r6, 48 */
|
|
0x87, 7, /* DW_CFA_offset, %r7, 56 */
|
|
0x88, 8, /* DW_CFA_offset, %r8, 64 */
|
|
0x89, 9, /* DW_CFA_offset, %r92, 72 */
|
|
0x8a, 10, /* DW_CFA_offset, %r10, 80 */
|
|
0x8b, 11, /* DW_CFA_offset, %r11, 88 */
|
|
0x8c, 12, /* DW_CFA_offset, %r12, 96 */
|
|
0x8d, 13, /* DW_CFA_offset, %r13, 104 */
|
|
0x8e, 14, /* DW_CFA_offset, %r14, 112 */
|
|
}
|
|
};
|
|
|
|
void tcg_register_jit(const void *buf, size_t buf_size)
|
|
{
|
|
tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
|
|
}
|