From cfee2139a01ff11e21f2e952a6e8f4dcdb3a4719 Mon Sep 17 00:00:00 2001 From: Eric Poole Date: Fri, 29 Apr 2022 17:11:34 -0400 Subject: [PATCH] TriCore Support (#1568) * TriCore Support python sample * Update sample_tricore.py Correct attribution * Update sample_tricore.py Fixed byte code to execute properly. * Update sample_tricore.py Removed testing artifact * Added tricore msvc config-file.h * Added STATIC to tricore config and added helper methods to symbol file generation. * Update op_helper.c Use built in crc32 * Fix tricore samples and small code blocks are now handled properly * Add CPU types * Generate bindings * Format code Co-authored-by: lazymio --- .gitignore | 2 - CMakeLists.txt | 56 +- bindings/const_generator.py | 8 +- .../dotnet/UnicornManaged/Const/Common.fs | 3 +- .../dotnet/UnicornManaged/Const/TriCore.fs | 132 + bindings/go/unicorn/tricore_const.go | 127 + bindings/go/unicorn/unicorn_const.go | 3 +- bindings/java/unicorn/TriCoreConst.java | 130 + bindings/java/unicorn/UnicornConst.java | 3 +- bindings/pascal/unicorn/TriCoreConst.pas | 132 + bindings/pascal/unicorn/UnicornConst.pas | 3 +- bindings/python/sample_tricore.py | 57 + bindings/python/unicorn/tricore_const.py | 124 + bindings/python/unicorn/unicorn_const.py | 3 +- .../lib/unicorn_engine/tricore_const.rb | 127 + .../lib/unicorn_engine/unicorn_const.rb | 3 +- include/uc_priv.h | 1 + include/unicorn/tricore.h | 174 + include/unicorn/unicorn.h | 2 + msvc/tricore-softmmu/config-target.h | 5 + qemu/configure | 11 +- qemu/include/tcg/tcg.h | 6 + qemu/target/tricore/cpu-param.h | 17 + qemu/target/tricore/cpu-qom.h | 43 + qemu/target/tricore/cpu.c | 205 + qemu/target/tricore/cpu.h | 410 + qemu/target/tricore/csfr.def | 125 + qemu/target/tricore/fpu_helper.c | 478 + qemu/target/tricore/helper.c | 162 + qemu/target/tricore/helper.h | 163 + qemu/target/tricore/op_helper.c | 2795 +++++ qemu/target/tricore/translate.c | 9374 +++++++++++++++++ qemu/target/tricore/tricore-defs.h | 23 + qemu/target/tricore/tricore-opcodes.h | 1474 +++ qemu/target/tricore/unicorn.c | 270 + qemu/target/tricore/unicorn.h | 27 + qemu/tricore.h | 1289 +++ samples/Makefile | 3 + samples/sample_tricore.c | 100 + symbols.sh | 14 +- tests/unit/test_tricore.c | 6 + uc.c | 25 + 42 files changed, 18103 insertions(+), 12 deletions(-) create mode 100644 bindings/dotnet/UnicornManaged/Const/TriCore.fs create mode 100644 bindings/go/unicorn/tricore_const.go create mode 100644 bindings/java/unicorn/TriCoreConst.java create mode 100644 bindings/pascal/unicorn/TriCoreConst.pas create mode 100755 bindings/python/sample_tricore.py create mode 100644 bindings/python/unicorn/tricore_const.py create mode 100644 bindings/ruby/unicorn_gem/lib/unicorn_engine/tricore_const.rb create mode 100644 include/unicorn/tricore.h create mode 100644 msvc/tricore-softmmu/config-target.h create mode 100644 qemu/target/tricore/cpu-param.h create mode 100644 qemu/target/tricore/cpu-qom.h create mode 100644 qemu/target/tricore/cpu.c create mode 100644 qemu/target/tricore/cpu.h create mode 100644 qemu/target/tricore/csfr.def create mode 100644 qemu/target/tricore/fpu_helper.c create mode 100644 qemu/target/tricore/helper.c create mode 100644 qemu/target/tricore/helper.h create mode 100644 qemu/target/tricore/op_helper.c create mode 100644 qemu/target/tricore/translate.c create mode 100644 qemu/target/tricore/tricore-defs.h create mode 100644 qemu/target/tricore/tricore-opcodes.h create mode 100644 qemu/target/tricore/unicorn.c create mode 100644 qemu/target/tricore/unicorn.h create mode 100644 qemu/tricore.h create mode 100644 samples/sample_tricore.c create mode 100644 tests/unit/test_tricore.c diff --git a/.gitignore b/.gitignore index 763ff742..fad42502 100644 --- a/.gitignore +++ b/.gitignore @@ -12,8 +12,6 @@ *.jar *~ -qemu/*-softmmu/ - tags qemu/config-host.ld qemu/config.log diff --git a/CMakeLists.txt b/CMakeLists.txt index 7d1f1df3..3710e80e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -44,7 +44,7 @@ option(BUILD_SHARED_LIBS "Build shared instead of static library" ${PROJECT_IS_T option(UNICORN_FUZZ "Enable fuzzing" OFF) option(UNICORN_BUILD_TESTS "Build unicorn tests" ${PROJECT_IS_TOP_LEVEL}) option(UNICORN_INSTALL "Enable unicorn installation" ${PROJECT_IS_TOP_LEVEL}) -set(UNICORN_ARCH "x86;arm;aarch64;riscv;mips;sparc;m68k;ppc;s390x" CACHE STRING "Enabled unicorn architectures") +set(UNICORN_ARCH "x86;arm;aarch64;riscv;mips;sparc;m68k;ppc;s390x;tricore" CACHE STRING "Enabled unicorn architectures") option(UNICORN_TRACER "Trace unicorn execution" OFF) foreach(ARCH_LOOP ${UNICORN_ARCH}) @@ -209,6 +209,11 @@ else() set(UNICORN_TARGET_ARCH "s390") break() endif() + string(FIND ${UC_COMPILER_MACRO} "__tricore__" UC_RET) + if (${UC_RET} GREATER_EQUAL "0") + set(UNICORN_TARGET_ARCH "tricore") + break() + endif() message(FATAL_ERROR "Unknown host compiler: ${CMAKE_C_COMPILER}.") endwhile(TRUE) endif() @@ -241,6 +246,9 @@ else() if (UNICORN_HAS_S390X) set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_S390X ") endif() + if (UNICORN_HAS_TRICORE) + set (EXTRA_CFLAGS "${EXTRA_CFLAGS}-DUNICORN_HAS_TRICORE ") + endif() set(EXTRA_CFLAGS "${EXTRA_CFLAGS}-fPIC") if(ANDROID_ABI) @@ -282,6 +290,9 @@ else() if(UNICORN_HAS_S390X) set(TARGET_LIST "${TARGET_LIST}s390x-softmmu, ") endif() + if (UNICORN_HAS_TRICORE) + set (TARGET_LIST "${TARGET_LIST}tricore-softmmu, ") + endif() set(TARGET_LIST "${TARGET_LIST} ") # GEN config-host.mak & target directories @@ -373,6 +384,12 @@ else() OUTPUT_FILE ${CMAKE_BINARY_DIR}/s390x-softmmu/config-target.h ) endif() + if (UNICORN_HAS_TRICORE) + execute_process(COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/qemu/scripts/create_config + INPUT_FILE ${CMAKE_BINARY_DIR}/tricore-softmmu/config-target.mak + OUTPUT_FILE ${CMAKE_BINARY_DIR}/tricore-softmmu/config-target.h + ) + endif() add_compile_options( ${UNICORN_CFLAGS} -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/tcg/${UNICORN_TARGET_ARCH} @@ -1034,6 +1051,36 @@ endif() endif() +if (UNICORN_HAS_TRICORE) +add_library(tricore-softmmu STATIC + ${UNICORN_ARCH_COMMON} + + qemu/target/tricore/cpu.c + qemu/target/tricore/fpu_helper.c + qemu/target/tricore/helper.c + qemu/target/tricore/op_helper.c + qemu/target/tricore/translate.c + qemu/target/tricore/unicorn.c +) + +if(MSVC) + target_compile_options(tricore-softmmu PRIVATE + -DNEED_CPU_H + /FItricore.h + /I${CMAKE_CURRENT_SOURCE_DIR}/msvc/tricore-softmmu + /I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/tricore + ) +else() + target_compile_options(tricore-softmmu PRIVATE + -DNEED_CPU_H + -include tricore.h + -I${CMAKE_BINARY_DIR}/tricore-softmmu + -I${CMAKE_CURRENT_SOURCE_DIR}/qemu/target/tricore + ) +endif() +endif() + + set(UNICORN_SRCS uc.c @@ -1194,6 +1241,13 @@ if (UNICORN_HAS_S390X) target_link_libraries(s390x-softmmu PRIVATE unicorn-common) set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_s390x) endif() +if (UNICORN_HAS_TRICORE) + set(UNICORN_COMPILE_OPTIONS ${UNICORN_COMPILE_OPTIONS} -DUNICORN_HAS_TRICORE) + set(UNICORN_LINK_LIBRARIES ${UNICORN_LINK_LIBRARIES} tricore-softmmu) + set(UNICORN_SAMPLE_FILE ${UNICORN_SAMPLE_FILE} sample_tricore) + target_link_libraries(tricore-softmmu unicorn-common) + set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_tricore) +endif() # Extra tests set(UNICORN_TEST_FILE ${UNICORN_TEST_FILE} test_mem) diff --git a/bindings/const_generator.py b/bindings/const_generator.py index 7052b2bf..2a74c864 100644 --- a/bindings/const_generator.py +++ b/bindings/const_generator.py @@ -6,7 +6,7 @@ import sys, re, os INCL_DIR = os.path.join('..', 'include', 'unicorn') -include = [ 'arm.h', 'arm64.h', 'mips.h', 'x86.h', 'sparc.h', 'm68k.h', 'ppc.h', 'riscv.h', 's390x.h', 'unicorn.h' ] +include = [ 'arm.h', 'arm64.h', 'mips.h', 'x86.h', 'sparc.h', 'm68k.h', 'ppc.h', 'riscv.h', 's390x.h', 'tricore.h', 'unicorn.h' ] template = { 'python': { @@ -24,6 +24,7 @@ template = { 'ppc.h': 'ppc', 'riscv.h': 'riscv', 's390x.h' : 's390x', + 'tricore.h' : 'tricore', 'unicorn.h': 'unicorn', 'comment_open': '#', 'comment_close': '', @@ -43,6 +44,7 @@ template = { 'ppc.h': 'ppc', 'riscv.h': 'riscv', 's390x.h' : 's390x', + 'tricore.h' : 'tricore', 'unicorn.h': 'unicorn', 'comment_open': '#', 'comment_close': '', @@ -62,6 +64,7 @@ template = { 'ppc.h': 'ppc', 'riscv.h': 'riscv', 's390x.h' : 's390x', + 'tricore.h' : 'tricore', 'unicorn.h': 'unicorn', 'comment_open': '//', 'comment_close': '', @@ -81,6 +84,7 @@ template = { 'ppc.h': 'Ppc', 'riscv.h': 'Riscv', 's390x.h' : 'S390x', + 'tricore.h' : 'TriCore', 'unicorn.h': 'Unicorn', 'comment_open': '//', 'comment_close': '', @@ -100,6 +104,7 @@ template = { 'ppc.h': 'Ppc', 'riscv.h': 'Riscv', 's390x.h' : 'S390x', + 'tricore.h' : 'TriCore', 'unicorn.h': 'Common', 'comment_open': ' //', 'comment_close': '', @@ -119,6 +124,7 @@ template = { 'ppc.h': 'Ppc', 'riscv.h': 'Riscv', 's390x.h' : 'S390x', + 'tricore.h' : 'TriCore', 'unicorn.h': 'Unicorn', 'comment_open': '//', 'comment_close': '', diff --git a/bindings/dotnet/UnicornManaged/Const/Common.fs b/bindings/dotnet/UnicornManaged/Const/Common.fs index 20ac500c..e233c479 100644 --- a/bindings/dotnet/UnicornManaged/Const/Common.fs +++ b/bindings/dotnet/UnicornManaged/Const/Common.fs @@ -29,7 +29,8 @@ module Common = let UC_ARCH_M68K = 7 let UC_ARCH_RISCV = 8 let UC_ARCH_S390X = 9 - let UC_ARCH_MAX = 10 + let UC_ARCH_TRICORE = 10 + let UC_ARCH_MAX = 11 let UC_MODE_LITTLE_ENDIAN = 0 let UC_MODE_BIG_ENDIAN = 1073741824 diff --git a/bindings/dotnet/UnicornManaged/Const/TriCore.fs b/bindings/dotnet/UnicornManaged/Const/TriCore.fs new file mode 100644 index 00000000..0c82fb97 --- /dev/null +++ b/bindings/dotnet/UnicornManaged/Const/TriCore.fs @@ -0,0 +1,132 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +namespace UnicornManaged.Const + +open System + +[] +module TriCore = + + // TRICORE CPU + + let UC_CPU_TRICORE_TC1796 = 0 + let UC_CPU_TRICORE_TC1797 = 1 + let UC_CPU_TRICORE_TC27X = 2 + let UC_CPU_TRICORE_ENDING = 3 + + // TRICORE registers + + let UC_TRICORE_REG_INVALID = 0 + let UC_TRICORE_REG_A0 = 1 + let UC_TRICORE_REG_A1 = 2 + let UC_TRICORE_REG_A2 = 3 + let UC_TRICORE_REG_A3 = 4 + let UC_TRICORE_REG_A4 = 5 + let UC_TRICORE_REG_A5 = 6 + let UC_TRICORE_REG_A6 = 7 + let UC_TRICORE_REG_A7 = 8 + let UC_TRICORE_REG_A8 = 9 + let UC_TRICORE_REG_A9 = 10 + let UC_TRICORE_REG_A10 = 11 + let UC_TRICORE_REG_A11 = 12 + let UC_TRICORE_REG_A12 = 13 + let UC_TRICORE_REG_A13 = 14 + let UC_TRICORE_REG_A14 = 15 + let UC_TRICORE_REG_A15 = 16 + let UC_TRICORE_REG_D0 = 17 + let UC_TRICORE_REG_D1 = 18 + let UC_TRICORE_REG_D2 = 19 + let UC_TRICORE_REG_D3 = 20 + let UC_TRICORE_REG_D4 = 21 + let UC_TRICORE_REG_D5 = 22 + let UC_TRICORE_REG_D6 = 23 + let UC_TRICORE_REG_D7 = 24 + let UC_TRICORE_REG_D8 = 25 + let UC_TRICORE_REG_D9 = 26 + let UC_TRICORE_REG_D10 = 27 + let UC_TRICORE_REG_D11 = 28 + let UC_TRICORE_REG_D12 = 29 + let UC_TRICORE_REG_D13 = 30 + let UC_TRICORE_REG_D14 = 31 + let UC_TRICORE_REG_D15 = 32 + let UC_TRICORE_REG_PCXI = 33 + let UC_TRICORE_REG_PSW = 34 + let UC_TRICORE_REG_PSW_USB_C = 35 + let UC_TRICORE_REG_PSW_USB_V = 36 + let UC_TRICORE_REG_PSW_USB_SV = 37 + let UC_TRICORE_REG_PSW_USB_AV = 38 + let UC_TRICORE_REG_PSW_USB_SAV = 39 + let UC_TRICORE_REG_PC = 40 + let UC_TRICORE_REG_SYSCON = 41 + let UC_TRICORE_REG_CPU_ID = 42 + let UC_TRICORE_REG_BIV = 43 + let UC_TRICORE_REG_BTV = 44 + let UC_TRICORE_REG_ISP = 45 + let UC_TRICORE_REG_ICR = 46 + let UC_TRICORE_REG_FCX = 47 + let UC_TRICORE_REG_LCX = 48 + let UC_TRICORE_REG_COMPAT = 49 + let UC_TRICORE_REG_DPR0_U = 50 + let UC_TRICORE_REG_DPR1_U = 51 + let UC_TRICORE_REG_DPR2_U = 52 + let UC_TRICORE_REG_DPR3_U = 53 + let UC_TRICORE_REG_DPR0_L = 54 + let UC_TRICORE_REG_DPR1_L = 55 + let UC_TRICORE_REG_DPR2_L = 56 + let UC_TRICORE_REG_DPR3_L = 57 + let UC_TRICORE_REG_CPR0_U = 58 + let UC_TRICORE_REG_CPR1_U = 59 + let UC_TRICORE_REG_CPR2_U = 60 + let UC_TRICORE_REG_CPR3_U = 61 + let UC_TRICORE_REG_CPR0_L = 62 + let UC_TRICORE_REG_CPR1_L = 63 + let UC_TRICORE_REG_CPR2_L = 64 + let UC_TRICORE_REG_CPR3_L = 65 + let UC_TRICORE_REG_DPM0 = 66 + let UC_TRICORE_REG_DPM1 = 67 + let UC_TRICORE_REG_DPM2 = 68 + let UC_TRICORE_REG_DPM3 = 69 + let UC_TRICORE_REG_CPM0 = 70 + let UC_TRICORE_REG_CPM1 = 71 + let UC_TRICORE_REG_CPM2 = 72 + let UC_TRICORE_REG_CPM3 = 73 + let UC_TRICORE_REG_MMU_CON = 74 + let UC_TRICORE_REG_MMU_ASI = 75 + let UC_TRICORE_REG_MMU_TVA = 76 + let UC_TRICORE_REG_MMU_TPA = 77 + let UC_TRICORE_REG_MMU_TPX = 78 + let UC_TRICORE_REG_MMU_TFA = 79 + let UC_TRICORE_REG_BMACON = 80 + let UC_TRICORE_REG_SMACON = 81 + let UC_TRICORE_REG_DIEAR = 82 + let UC_TRICORE_REG_DIETR = 83 + let UC_TRICORE_REG_CCDIER = 84 + let UC_TRICORE_REG_MIECON = 85 + let UC_TRICORE_REG_PIEAR = 86 + let UC_TRICORE_REG_PIETR = 87 + let UC_TRICORE_REG_CCPIER = 88 + let UC_TRICORE_REG_DBGSR = 89 + let UC_TRICORE_REG_EXEVT = 90 + let UC_TRICORE_REG_CREVT = 91 + let UC_TRICORE_REG_SWEVT = 92 + let UC_TRICORE_REG_TR0EVT = 93 + let UC_TRICORE_REG_TR1EVT = 94 + let UC_TRICORE_REG_DMS = 95 + let UC_TRICORE_REG_DCX = 96 + let UC_TRICORE_REG_DBGTCR = 97 + let UC_TRICORE_REG_CCTRL = 98 + let UC_TRICORE_REG_CCNT = 99 + let UC_TRICORE_REG_ICNT = 100 + let UC_TRICORE_REG_M1CNT = 101 + let UC_TRICORE_REG_M2CNT = 102 + let UC_TRICORE_REG_M3CNT = 103 + let UC_TRICORE_REG_ENDING = 104 + let UC_TRICORE_REG_GA0 = 1 + let UC_TRICORE_REG_GA1 = 2 + let UC_TRICORE_REG_GA8 = 9 + let UC_TRICORE_REG_GA9 = 10 + let UC_TRICORE_REG_SP = 11 + let UC_TRICORE_REG_LR = 12 + let UC_TRICORE_REG_IA = 16 + let UC_TRICORE_REG_ID = 32 + diff --git a/bindings/go/unicorn/tricore_const.go b/bindings/go/unicorn/tricore_const.go new file mode 100644 index 00000000..41fcb7f8 --- /dev/null +++ b/bindings/go/unicorn/tricore_const.go @@ -0,0 +1,127 @@ +package unicorn +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [tricore_const.go] +const ( + +// TRICORE CPU + + CPU_TRICORE_TC1796 = 0 + CPU_TRICORE_TC1797 = 1 + CPU_TRICORE_TC27X = 2 + CPU_TRICORE_ENDING = 3 + +// TRICORE registers + + TRICORE_REG_INVALID = 0 + TRICORE_REG_A0 = 1 + TRICORE_REG_A1 = 2 + TRICORE_REG_A2 = 3 + TRICORE_REG_A3 = 4 + TRICORE_REG_A4 = 5 + TRICORE_REG_A5 = 6 + TRICORE_REG_A6 = 7 + TRICORE_REG_A7 = 8 + TRICORE_REG_A8 = 9 + TRICORE_REG_A9 = 10 + TRICORE_REG_A10 = 11 + TRICORE_REG_A11 = 12 + TRICORE_REG_A12 = 13 + TRICORE_REG_A13 = 14 + TRICORE_REG_A14 = 15 + TRICORE_REG_A15 = 16 + TRICORE_REG_D0 = 17 + TRICORE_REG_D1 = 18 + TRICORE_REG_D2 = 19 + TRICORE_REG_D3 = 20 + TRICORE_REG_D4 = 21 + TRICORE_REG_D5 = 22 + TRICORE_REG_D6 = 23 + TRICORE_REG_D7 = 24 + TRICORE_REG_D8 = 25 + TRICORE_REG_D9 = 26 + TRICORE_REG_D10 = 27 + TRICORE_REG_D11 = 28 + TRICORE_REG_D12 = 29 + TRICORE_REG_D13 = 30 + TRICORE_REG_D14 = 31 + TRICORE_REG_D15 = 32 + TRICORE_REG_PCXI = 33 + TRICORE_REG_PSW = 34 + TRICORE_REG_PSW_USB_C = 35 + TRICORE_REG_PSW_USB_V = 36 + TRICORE_REG_PSW_USB_SV = 37 + TRICORE_REG_PSW_USB_AV = 38 + TRICORE_REG_PSW_USB_SAV = 39 + TRICORE_REG_PC = 40 + TRICORE_REG_SYSCON = 41 + TRICORE_REG_CPU_ID = 42 + TRICORE_REG_BIV = 43 + TRICORE_REG_BTV = 44 + TRICORE_REG_ISP = 45 + TRICORE_REG_ICR = 46 + TRICORE_REG_FCX = 47 + TRICORE_REG_LCX = 48 + TRICORE_REG_COMPAT = 49 + TRICORE_REG_DPR0_U = 50 + TRICORE_REG_DPR1_U = 51 + TRICORE_REG_DPR2_U = 52 + TRICORE_REG_DPR3_U = 53 + TRICORE_REG_DPR0_L = 54 + TRICORE_REG_DPR1_L = 55 + TRICORE_REG_DPR2_L = 56 + TRICORE_REG_DPR3_L = 57 + TRICORE_REG_CPR0_U = 58 + TRICORE_REG_CPR1_U = 59 + TRICORE_REG_CPR2_U = 60 + TRICORE_REG_CPR3_U = 61 + TRICORE_REG_CPR0_L = 62 + TRICORE_REG_CPR1_L = 63 + TRICORE_REG_CPR2_L = 64 + TRICORE_REG_CPR3_L = 65 + TRICORE_REG_DPM0 = 66 + TRICORE_REG_DPM1 = 67 + TRICORE_REG_DPM2 = 68 + TRICORE_REG_DPM3 = 69 + TRICORE_REG_CPM0 = 70 + TRICORE_REG_CPM1 = 71 + TRICORE_REG_CPM2 = 72 + TRICORE_REG_CPM3 = 73 + TRICORE_REG_MMU_CON = 74 + TRICORE_REG_MMU_ASI = 75 + TRICORE_REG_MMU_TVA = 76 + TRICORE_REG_MMU_TPA = 77 + TRICORE_REG_MMU_TPX = 78 + TRICORE_REG_MMU_TFA = 79 + TRICORE_REG_BMACON = 80 + TRICORE_REG_SMACON = 81 + TRICORE_REG_DIEAR = 82 + TRICORE_REG_DIETR = 83 + TRICORE_REG_CCDIER = 84 + TRICORE_REG_MIECON = 85 + TRICORE_REG_PIEAR = 86 + TRICORE_REG_PIETR = 87 + TRICORE_REG_CCPIER = 88 + TRICORE_REG_DBGSR = 89 + TRICORE_REG_EXEVT = 90 + TRICORE_REG_CREVT = 91 + TRICORE_REG_SWEVT = 92 + TRICORE_REG_TR0EVT = 93 + TRICORE_REG_TR1EVT = 94 + TRICORE_REG_DMS = 95 + TRICORE_REG_DCX = 96 + TRICORE_REG_DBGTCR = 97 + TRICORE_REG_CCTRL = 98 + TRICORE_REG_CCNT = 99 + TRICORE_REG_ICNT = 100 + TRICORE_REG_M1CNT = 101 + TRICORE_REG_M2CNT = 102 + TRICORE_REG_M3CNT = 103 + TRICORE_REG_ENDING = 104 + TRICORE_REG_GA0 = 1 + TRICORE_REG_GA1 = 2 + TRICORE_REG_GA8 = 9 + TRICORE_REG_GA9 = 10 + TRICORE_REG_SP = 11 + TRICORE_REG_LR = 12 + TRICORE_REG_IA = 16 + TRICORE_REG_ID = 32 +) \ No newline at end of file diff --git a/bindings/go/unicorn/unicorn_const.go b/bindings/go/unicorn/unicorn_const.go index bba025e9..03ff44bc 100644 --- a/bindings/go/unicorn/unicorn_const.go +++ b/bindings/go/unicorn/unicorn_const.go @@ -24,7 +24,8 @@ const ( ARCH_M68K = 7 ARCH_RISCV = 8 ARCH_S390X = 9 - ARCH_MAX = 10 + ARCH_TRICORE = 10 + ARCH_MAX = 11 MODE_LITTLE_ENDIAN = 0 MODE_BIG_ENDIAN = 1073741824 diff --git a/bindings/java/unicorn/TriCoreConst.java b/bindings/java/unicorn/TriCoreConst.java new file mode 100644 index 00000000..4154abad --- /dev/null +++ b/bindings/java/unicorn/TriCoreConst.java @@ -0,0 +1,130 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +package unicorn; + +public interface TriCoreConst { + +// TRICORE CPU + + public static final int UC_CPU_TRICORE_TC1796 = 0; + public static final int UC_CPU_TRICORE_TC1797 = 1; + public static final int UC_CPU_TRICORE_TC27X = 2; + public static final int UC_CPU_TRICORE_ENDING = 3; + +// TRICORE registers + + public static final int UC_TRICORE_REG_INVALID = 0; + public static final int UC_TRICORE_REG_A0 = 1; + public static final int UC_TRICORE_REG_A1 = 2; + public static final int UC_TRICORE_REG_A2 = 3; + public static final int UC_TRICORE_REG_A3 = 4; + public static final int UC_TRICORE_REG_A4 = 5; + public static final int UC_TRICORE_REG_A5 = 6; + public static final int UC_TRICORE_REG_A6 = 7; + public static final int UC_TRICORE_REG_A7 = 8; + public static final int UC_TRICORE_REG_A8 = 9; + public static final int UC_TRICORE_REG_A9 = 10; + public static final int UC_TRICORE_REG_A10 = 11; + public static final int UC_TRICORE_REG_A11 = 12; + public static final int UC_TRICORE_REG_A12 = 13; + public static final int UC_TRICORE_REG_A13 = 14; + public static final int UC_TRICORE_REG_A14 = 15; + public static final int UC_TRICORE_REG_A15 = 16; + public static final int UC_TRICORE_REG_D0 = 17; + public static final int UC_TRICORE_REG_D1 = 18; + public static final int UC_TRICORE_REG_D2 = 19; + public static final int UC_TRICORE_REG_D3 = 20; + public static final int UC_TRICORE_REG_D4 = 21; + public static final int UC_TRICORE_REG_D5 = 22; + public static final int UC_TRICORE_REG_D6 = 23; + public static final int UC_TRICORE_REG_D7 = 24; + public static final int UC_TRICORE_REG_D8 = 25; + public static final int UC_TRICORE_REG_D9 = 26; + public static final int UC_TRICORE_REG_D10 = 27; + public static final int UC_TRICORE_REG_D11 = 28; + public static final int UC_TRICORE_REG_D12 = 29; + public static final int UC_TRICORE_REG_D13 = 30; + public static final int UC_TRICORE_REG_D14 = 31; + public static final int UC_TRICORE_REG_D15 = 32; + public static final int UC_TRICORE_REG_PCXI = 33; + public static final int UC_TRICORE_REG_PSW = 34; + public static final int UC_TRICORE_REG_PSW_USB_C = 35; + public static final int UC_TRICORE_REG_PSW_USB_V = 36; + public static final int UC_TRICORE_REG_PSW_USB_SV = 37; + public static final int UC_TRICORE_REG_PSW_USB_AV = 38; + public static final int UC_TRICORE_REG_PSW_USB_SAV = 39; + public static final int UC_TRICORE_REG_PC = 40; + public static final int UC_TRICORE_REG_SYSCON = 41; + public static final int UC_TRICORE_REG_CPU_ID = 42; + public static final int UC_TRICORE_REG_BIV = 43; + public static final int UC_TRICORE_REG_BTV = 44; + public static final int UC_TRICORE_REG_ISP = 45; + public static final int UC_TRICORE_REG_ICR = 46; + public static final int UC_TRICORE_REG_FCX = 47; + public static final int UC_TRICORE_REG_LCX = 48; + public static final int UC_TRICORE_REG_COMPAT = 49; + public static final int UC_TRICORE_REG_DPR0_U = 50; + public static final int UC_TRICORE_REG_DPR1_U = 51; + public static final int UC_TRICORE_REG_DPR2_U = 52; + public static final int UC_TRICORE_REG_DPR3_U = 53; + public static final int UC_TRICORE_REG_DPR0_L = 54; + public static final int UC_TRICORE_REG_DPR1_L = 55; + public static final int UC_TRICORE_REG_DPR2_L = 56; + public static final int UC_TRICORE_REG_DPR3_L = 57; + public static final int UC_TRICORE_REG_CPR0_U = 58; + public static final int UC_TRICORE_REG_CPR1_U = 59; + public static final int UC_TRICORE_REG_CPR2_U = 60; + public static final int UC_TRICORE_REG_CPR3_U = 61; + public static final int UC_TRICORE_REG_CPR0_L = 62; + public static final int UC_TRICORE_REG_CPR1_L = 63; + public static final int UC_TRICORE_REG_CPR2_L = 64; + public static final int UC_TRICORE_REG_CPR3_L = 65; + public static final int UC_TRICORE_REG_DPM0 = 66; + public static final int UC_TRICORE_REG_DPM1 = 67; + public static final int UC_TRICORE_REG_DPM2 = 68; + public static final int UC_TRICORE_REG_DPM3 = 69; + public static final int UC_TRICORE_REG_CPM0 = 70; + public static final int UC_TRICORE_REG_CPM1 = 71; + public static final int UC_TRICORE_REG_CPM2 = 72; + public static final int UC_TRICORE_REG_CPM3 = 73; + public static final int UC_TRICORE_REG_MMU_CON = 74; + public static final int UC_TRICORE_REG_MMU_ASI = 75; + public static final int UC_TRICORE_REG_MMU_TVA = 76; + public static final int UC_TRICORE_REG_MMU_TPA = 77; + public static final int UC_TRICORE_REG_MMU_TPX = 78; + public static final int UC_TRICORE_REG_MMU_TFA = 79; + public static final int UC_TRICORE_REG_BMACON = 80; + public static final int UC_TRICORE_REG_SMACON = 81; + public static final int UC_TRICORE_REG_DIEAR = 82; + public static final int UC_TRICORE_REG_DIETR = 83; + public static final int UC_TRICORE_REG_CCDIER = 84; + public static final int UC_TRICORE_REG_MIECON = 85; + public static final int UC_TRICORE_REG_PIEAR = 86; + public static final int UC_TRICORE_REG_PIETR = 87; + public static final int UC_TRICORE_REG_CCPIER = 88; + public static final int UC_TRICORE_REG_DBGSR = 89; + public static final int UC_TRICORE_REG_EXEVT = 90; + public static final int UC_TRICORE_REG_CREVT = 91; + public static final int UC_TRICORE_REG_SWEVT = 92; + public static final int UC_TRICORE_REG_TR0EVT = 93; + public static final int UC_TRICORE_REG_TR1EVT = 94; + public static final int UC_TRICORE_REG_DMS = 95; + public static final int UC_TRICORE_REG_DCX = 96; + public static final int UC_TRICORE_REG_DBGTCR = 97; + public static final int UC_TRICORE_REG_CCTRL = 98; + public static final int UC_TRICORE_REG_CCNT = 99; + public static final int UC_TRICORE_REG_ICNT = 100; + public static final int UC_TRICORE_REG_M1CNT = 101; + public static final int UC_TRICORE_REG_M2CNT = 102; + public static final int UC_TRICORE_REG_M3CNT = 103; + public static final int UC_TRICORE_REG_ENDING = 104; + public static final int UC_TRICORE_REG_GA0 = 1; + public static final int UC_TRICORE_REG_GA1 = 2; + public static final int UC_TRICORE_REG_GA8 = 9; + public static final int UC_TRICORE_REG_GA9 = 10; + public static final int UC_TRICORE_REG_SP = 11; + public static final int UC_TRICORE_REG_LR = 12; + public static final int UC_TRICORE_REG_IA = 16; + public static final int UC_TRICORE_REG_ID = 32; + +} diff --git a/bindings/java/unicorn/UnicornConst.java b/bindings/java/unicorn/UnicornConst.java index 27a23c41..24dd7acf 100644 --- a/bindings/java/unicorn/UnicornConst.java +++ b/bindings/java/unicorn/UnicornConst.java @@ -26,7 +26,8 @@ public interface UnicornConst { public static final int UC_ARCH_M68K = 7; public static final int UC_ARCH_RISCV = 8; public static final int UC_ARCH_S390X = 9; - public static final int UC_ARCH_MAX = 10; + public static final int UC_ARCH_TRICORE = 10; + public static final int UC_ARCH_MAX = 11; public static final int UC_MODE_LITTLE_ENDIAN = 0; public static final int UC_MODE_BIG_ENDIAN = 1073741824; diff --git a/bindings/pascal/unicorn/TriCoreConst.pas b/bindings/pascal/unicorn/TriCoreConst.pas new file mode 100644 index 00000000..5869c377 --- /dev/null +++ b/bindings/pascal/unicorn/TriCoreConst.pas @@ -0,0 +1,132 @@ +// For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT + +unit TriCoreConst; + +interface + +const +// TRICORE CPU + + UC_CPU_TRICORE_TC1796 = 0; + UC_CPU_TRICORE_TC1797 = 1; + UC_CPU_TRICORE_TC27X = 2; + UC_CPU_TRICORE_ENDING = 3; + +// TRICORE registers + + UC_TRICORE_REG_INVALID = 0; + UC_TRICORE_REG_A0 = 1; + UC_TRICORE_REG_A1 = 2; + UC_TRICORE_REG_A2 = 3; + UC_TRICORE_REG_A3 = 4; + UC_TRICORE_REG_A4 = 5; + UC_TRICORE_REG_A5 = 6; + UC_TRICORE_REG_A6 = 7; + UC_TRICORE_REG_A7 = 8; + UC_TRICORE_REG_A8 = 9; + UC_TRICORE_REG_A9 = 10; + UC_TRICORE_REG_A10 = 11; + UC_TRICORE_REG_A11 = 12; + UC_TRICORE_REG_A12 = 13; + UC_TRICORE_REG_A13 = 14; + UC_TRICORE_REG_A14 = 15; + UC_TRICORE_REG_A15 = 16; + UC_TRICORE_REG_D0 = 17; + UC_TRICORE_REG_D1 = 18; + UC_TRICORE_REG_D2 = 19; + UC_TRICORE_REG_D3 = 20; + UC_TRICORE_REG_D4 = 21; + UC_TRICORE_REG_D5 = 22; + UC_TRICORE_REG_D6 = 23; + UC_TRICORE_REG_D7 = 24; + UC_TRICORE_REG_D8 = 25; + UC_TRICORE_REG_D9 = 26; + UC_TRICORE_REG_D10 = 27; + UC_TRICORE_REG_D11 = 28; + UC_TRICORE_REG_D12 = 29; + UC_TRICORE_REG_D13 = 30; + UC_TRICORE_REG_D14 = 31; + UC_TRICORE_REG_D15 = 32; + UC_TRICORE_REG_PCXI = 33; + UC_TRICORE_REG_PSW = 34; + UC_TRICORE_REG_PSW_USB_C = 35; + UC_TRICORE_REG_PSW_USB_V = 36; + UC_TRICORE_REG_PSW_USB_SV = 37; + UC_TRICORE_REG_PSW_USB_AV = 38; + UC_TRICORE_REG_PSW_USB_SAV = 39; + UC_TRICORE_REG_PC = 40; + UC_TRICORE_REG_SYSCON = 41; + UC_TRICORE_REG_CPU_ID = 42; + UC_TRICORE_REG_BIV = 43; + UC_TRICORE_REG_BTV = 44; + UC_TRICORE_REG_ISP = 45; + UC_TRICORE_REG_ICR = 46; + UC_TRICORE_REG_FCX = 47; + UC_TRICORE_REG_LCX = 48; + UC_TRICORE_REG_COMPAT = 49; + UC_TRICORE_REG_DPR0_U = 50; + UC_TRICORE_REG_DPR1_U = 51; + UC_TRICORE_REG_DPR2_U = 52; + UC_TRICORE_REG_DPR3_U = 53; + UC_TRICORE_REG_DPR0_L = 54; + UC_TRICORE_REG_DPR1_L = 55; + UC_TRICORE_REG_DPR2_L = 56; + UC_TRICORE_REG_DPR3_L = 57; + UC_TRICORE_REG_CPR0_U = 58; + UC_TRICORE_REG_CPR1_U = 59; + UC_TRICORE_REG_CPR2_U = 60; + UC_TRICORE_REG_CPR3_U = 61; + UC_TRICORE_REG_CPR0_L = 62; + UC_TRICORE_REG_CPR1_L = 63; + UC_TRICORE_REG_CPR2_L = 64; + UC_TRICORE_REG_CPR3_L = 65; + UC_TRICORE_REG_DPM0 = 66; + UC_TRICORE_REG_DPM1 = 67; + UC_TRICORE_REG_DPM2 = 68; + UC_TRICORE_REG_DPM3 = 69; + UC_TRICORE_REG_CPM0 = 70; + UC_TRICORE_REG_CPM1 = 71; + UC_TRICORE_REG_CPM2 = 72; + UC_TRICORE_REG_CPM3 = 73; + UC_TRICORE_REG_MMU_CON = 74; + UC_TRICORE_REG_MMU_ASI = 75; + UC_TRICORE_REG_MMU_TVA = 76; + UC_TRICORE_REG_MMU_TPA = 77; + UC_TRICORE_REG_MMU_TPX = 78; + UC_TRICORE_REG_MMU_TFA = 79; + UC_TRICORE_REG_BMACON = 80; + UC_TRICORE_REG_SMACON = 81; + UC_TRICORE_REG_DIEAR = 82; + UC_TRICORE_REG_DIETR = 83; + UC_TRICORE_REG_CCDIER = 84; + UC_TRICORE_REG_MIECON = 85; + UC_TRICORE_REG_PIEAR = 86; + UC_TRICORE_REG_PIETR = 87; + UC_TRICORE_REG_CCPIER = 88; + UC_TRICORE_REG_DBGSR = 89; + UC_TRICORE_REG_EXEVT = 90; + UC_TRICORE_REG_CREVT = 91; + UC_TRICORE_REG_SWEVT = 92; + UC_TRICORE_REG_TR0EVT = 93; + UC_TRICORE_REG_TR1EVT = 94; + UC_TRICORE_REG_DMS = 95; + UC_TRICORE_REG_DCX = 96; + UC_TRICORE_REG_DBGTCR = 97; + UC_TRICORE_REG_CCTRL = 98; + UC_TRICORE_REG_CCNT = 99; + UC_TRICORE_REG_ICNT = 100; + UC_TRICORE_REG_M1CNT = 101; + UC_TRICORE_REG_M2CNT = 102; + UC_TRICORE_REG_M3CNT = 103; + UC_TRICORE_REG_ENDING = 104; + UC_TRICORE_REG_GA0 = 1; + UC_TRICORE_REG_GA1 = 2; + UC_TRICORE_REG_GA8 = 9; + UC_TRICORE_REG_GA9 = 10; + UC_TRICORE_REG_SP = 11; + UC_TRICORE_REG_LR = 12; + UC_TRICORE_REG_IA = 16; + UC_TRICORE_REG_ID = 32; + +implementation +end. \ No newline at end of file diff --git a/bindings/pascal/unicorn/UnicornConst.pas b/bindings/pascal/unicorn/UnicornConst.pas index f96cf1ed..f130dda9 100644 --- a/bindings/pascal/unicorn/UnicornConst.pas +++ b/bindings/pascal/unicorn/UnicornConst.pas @@ -27,7 +27,8 @@ const UC_API_MAJOR = 2; UC_ARCH_M68K = 7; UC_ARCH_RISCV = 8; UC_ARCH_S390X = 9; - UC_ARCH_MAX = 10; + UC_ARCH_TRICORE = 10; + UC_ARCH_MAX = 11; UC_MODE_LITTLE_ENDIAN = 0; UC_MODE_BIG_ENDIAN = 1073741824; diff --git a/bindings/python/sample_tricore.py b/bindings/python/sample_tricore.py new file mode 100755 index 00000000..2e7174e1 --- /dev/null +++ b/bindings/python/sample_tricore.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python + +''' + Created for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +''' + +from __future__ import print_function +from unicorn import * +from unicorn.tricore_const import * + +# code to be emulated +TRICORE_CODE = b"\x82\x11\xbb\x00\x00\x08" # mov d0, #0x1; mov.u d0, #0x8000 +# memory address where emulation starts +ADDRESS = 0x10000 + +# callback for tracing basic blocks +def hook_block(uc, address, size, user_data): + print(">>> Tracing basic block at 0x%x, block size = 0x%x" %(address, size)) + +# callback for tracing instructions +def hook_code(uc, address, size, user_data): + print(">>> Tracing instruction at 0x%x, instruction size = 0x%x" %(address, size)) + +# Test TriCore +def test_tricore(): + print("Emulate TriCore code") + try: + # Initialize emulator in TriCore mode + mu = Uc(UC_ARCH_TRICORE, UC_MODE_LITTLE_ENDIAN) + + # map 2MB memory for this emulation + mu.mem_map(ADDRESS, 2 * 1024 * 1024) + + # write machine code to be emulated to memory + mu.mem_write(ADDRESS, TRICORE_CODE) + + # tracing all basic blocks with customized callback + mu.hook_add(UC_HOOK_BLOCK, hook_block) + + # tracing one instruction at ADDRESS with customized callback + mu.hook_add(UC_HOOK_CODE, hook_code) + + # emulate machine code in infinite time + mu.emu_start(ADDRESS, ADDRESS + len(TRICORE_CODE)) + + # now print out some registers + print(">>> Emulation done. Below is the CPU context") + + r0 = mu.reg_read(UC_TRICORE_REG_D0) + print(">>> D0 = 0x%x" %r0) + + except UcError as e: + print("ERROR: %s" % e) + +if __name__ == '__main__': + test_tricore() diff --git a/bindings/python/unicorn/tricore_const.py b/bindings/python/unicorn/tricore_const.py new file mode 100644 index 00000000..2453e003 --- /dev/null +++ b/bindings/python/unicorn/tricore_const.py @@ -0,0 +1,124 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [tricore_const.py] + +# TRICORE CPU + +UC_CPU_TRICORE_TC1796 = 0 +UC_CPU_TRICORE_TC1797 = 1 +UC_CPU_TRICORE_TC27X = 2 +UC_CPU_TRICORE_ENDING = 3 + +# TRICORE registers + +UC_TRICORE_REG_INVALID = 0 +UC_TRICORE_REG_A0 = 1 +UC_TRICORE_REG_A1 = 2 +UC_TRICORE_REG_A2 = 3 +UC_TRICORE_REG_A3 = 4 +UC_TRICORE_REG_A4 = 5 +UC_TRICORE_REG_A5 = 6 +UC_TRICORE_REG_A6 = 7 +UC_TRICORE_REG_A7 = 8 +UC_TRICORE_REG_A8 = 9 +UC_TRICORE_REG_A9 = 10 +UC_TRICORE_REG_A10 = 11 +UC_TRICORE_REG_A11 = 12 +UC_TRICORE_REG_A12 = 13 +UC_TRICORE_REG_A13 = 14 +UC_TRICORE_REG_A14 = 15 +UC_TRICORE_REG_A15 = 16 +UC_TRICORE_REG_D0 = 17 +UC_TRICORE_REG_D1 = 18 +UC_TRICORE_REG_D2 = 19 +UC_TRICORE_REG_D3 = 20 +UC_TRICORE_REG_D4 = 21 +UC_TRICORE_REG_D5 = 22 +UC_TRICORE_REG_D6 = 23 +UC_TRICORE_REG_D7 = 24 +UC_TRICORE_REG_D8 = 25 +UC_TRICORE_REG_D9 = 26 +UC_TRICORE_REG_D10 = 27 +UC_TRICORE_REG_D11 = 28 +UC_TRICORE_REG_D12 = 29 +UC_TRICORE_REG_D13 = 30 +UC_TRICORE_REG_D14 = 31 +UC_TRICORE_REG_D15 = 32 +UC_TRICORE_REG_PCXI = 33 +UC_TRICORE_REG_PSW = 34 +UC_TRICORE_REG_PSW_USB_C = 35 +UC_TRICORE_REG_PSW_USB_V = 36 +UC_TRICORE_REG_PSW_USB_SV = 37 +UC_TRICORE_REG_PSW_USB_AV = 38 +UC_TRICORE_REG_PSW_USB_SAV = 39 +UC_TRICORE_REG_PC = 40 +UC_TRICORE_REG_SYSCON = 41 +UC_TRICORE_REG_CPU_ID = 42 +UC_TRICORE_REG_BIV = 43 +UC_TRICORE_REG_BTV = 44 +UC_TRICORE_REG_ISP = 45 +UC_TRICORE_REG_ICR = 46 +UC_TRICORE_REG_FCX = 47 +UC_TRICORE_REG_LCX = 48 +UC_TRICORE_REG_COMPAT = 49 +UC_TRICORE_REG_DPR0_U = 50 +UC_TRICORE_REG_DPR1_U = 51 +UC_TRICORE_REG_DPR2_U = 52 +UC_TRICORE_REG_DPR3_U = 53 +UC_TRICORE_REG_DPR0_L = 54 +UC_TRICORE_REG_DPR1_L = 55 +UC_TRICORE_REG_DPR2_L = 56 +UC_TRICORE_REG_DPR3_L = 57 +UC_TRICORE_REG_CPR0_U = 58 +UC_TRICORE_REG_CPR1_U = 59 +UC_TRICORE_REG_CPR2_U = 60 +UC_TRICORE_REG_CPR3_U = 61 +UC_TRICORE_REG_CPR0_L = 62 +UC_TRICORE_REG_CPR1_L = 63 +UC_TRICORE_REG_CPR2_L = 64 +UC_TRICORE_REG_CPR3_L = 65 +UC_TRICORE_REG_DPM0 = 66 +UC_TRICORE_REG_DPM1 = 67 +UC_TRICORE_REG_DPM2 = 68 +UC_TRICORE_REG_DPM3 = 69 +UC_TRICORE_REG_CPM0 = 70 +UC_TRICORE_REG_CPM1 = 71 +UC_TRICORE_REG_CPM2 = 72 +UC_TRICORE_REG_CPM3 = 73 +UC_TRICORE_REG_MMU_CON = 74 +UC_TRICORE_REG_MMU_ASI = 75 +UC_TRICORE_REG_MMU_TVA = 76 +UC_TRICORE_REG_MMU_TPA = 77 +UC_TRICORE_REG_MMU_TPX = 78 +UC_TRICORE_REG_MMU_TFA = 79 +UC_TRICORE_REG_BMACON = 80 +UC_TRICORE_REG_SMACON = 81 +UC_TRICORE_REG_DIEAR = 82 +UC_TRICORE_REG_DIETR = 83 +UC_TRICORE_REG_CCDIER = 84 +UC_TRICORE_REG_MIECON = 85 +UC_TRICORE_REG_PIEAR = 86 +UC_TRICORE_REG_PIETR = 87 +UC_TRICORE_REG_CCPIER = 88 +UC_TRICORE_REG_DBGSR = 89 +UC_TRICORE_REG_EXEVT = 90 +UC_TRICORE_REG_CREVT = 91 +UC_TRICORE_REG_SWEVT = 92 +UC_TRICORE_REG_TR0EVT = 93 +UC_TRICORE_REG_TR1EVT = 94 +UC_TRICORE_REG_DMS = 95 +UC_TRICORE_REG_DCX = 96 +UC_TRICORE_REG_DBGTCR = 97 +UC_TRICORE_REG_CCTRL = 98 +UC_TRICORE_REG_CCNT = 99 +UC_TRICORE_REG_ICNT = 100 +UC_TRICORE_REG_M1CNT = 101 +UC_TRICORE_REG_M2CNT = 102 +UC_TRICORE_REG_M3CNT = 103 +UC_TRICORE_REG_ENDING = 104 +UC_TRICORE_REG_GA0 = 1 +UC_TRICORE_REG_GA1 = 2 +UC_TRICORE_REG_GA8 = 9 +UC_TRICORE_REG_GA9 = 10 +UC_TRICORE_REG_SP = 11 +UC_TRICORE_REG_LR = 12 +UC_TRICORE_REG_IA = 16 +UC_TRICORE_REG_ID = 32 diff --git a/bindings/python/unicorn/unicorn_const.py b/bindings/python/unicorn/unicorn_const.py index 64d47cda..28528448 100644 --- a/bindings/python/unicorn/unicorn_const.py +++ b/bindings/python/unicorn/unicorn_const.py @@ -22,7 +22,8 @@ UC_ARCH_SPARC = 6 UC_ARCH_M68K = 7 UC_ARCH_RISCV = 8 UC_ARCH_S390X = 9 -UC_ARCH_MAX = 10 +UC_ARCH_TRICORE = 10 +UC_ARCH_MAX = 11 UC_MODE_LITTLE_ENDIAN = 0 UC_MODE_BIG_ENDIAN = 1073741824 diff --git a/bindings/ruby/unicorn_gem/lib/unicorn_engine/tricore_const.rb b/bindings/ruby/unicorn_gem/lib/unicorn_engine/tricore_const.rb new file mode 100644 index 00000000..32bba0cf --- /dev/null +++ b/bindings/ruby/unicorn_gem/lib/unicorn_engine/tricore_const.rb @@ -0,0 +1,127 @@ +# For Unicorn Engine. AUTO-GENERATED FILE, DO NOT EDIT [tricore_const.rb] + +module UnicornEngine + +# TRICORE CPU + + UC_CPU_TRICORE_TC1796 = 0 + UC_CPU_TRICORE_TC1797 = 1 + UC_CPU_TRICORE_TC27X = 2 + UC_CPU_TRICORE_ENDING = 3 + +# TRICORE registers + + UC_TRICORE_REG_INVALID = 0 + UC_TRICORE_REG_A0 = 1 + UC_TRICORE_REG_A1 = 2 + UC_TRICORE_REG_A2 = 3 + UC_TRICORE_REG_A3 = 4 + UC_TRICORE_REG_A4 = 5 + UC_TRICORE_REG_A5 = 6 + UC_TRICORE_REG_A6 = 7 + UC_TRICORE_REG_A7 = 8 + UC_TRICORE_REG_A8 = 9 + UC_TRICORE_REG_A9 = 10 + UC_TRICORE_REG_A10 = 11 + UC_TRICORE_REG_A11 = 12 + UC_TRICORE_REG_A12 = 13 + UC_TRICORE_REG_A13 = 14 + UC_TRICORE_REG_A14 = 15 + UC_TRICORE_REG_A15 = 16 + UC_TRICORE_REG_D0 = 17 + UC_TRICORE_REG_D1 = 18 + UC_TRICORE_REG_D2 = 19 + UC_TRICORE_REG_D3 = 20 + UC_TRICORE_REG_D4 = 21 + UC_TRICORE_REG_D5 = 22 + UC_TRICORE_REG_D6 = 23 + UC_TRICORE_REG_D7 = 24 + UC_TRICORE_REG_D8 = 25 + UC_TRICORE_REG_D9 = 26 + UC_TRICORE_REG_D10 = 27 + UC_TRICORE_REG_D11 = 28 + UC_TRICORE_REG_D12 = 29 + UC_TRICORE_REG_D13 = 30 + UC_TRICORE_REG_D14 = 31 + UC_TRICORE_REG_D15 = 32 + UC_TRICORE_REG_PCXI = 33 + UC_TRICORE_REG_PSW = 34 + UC_TRICORE_REG_PSW_USB_C = 35 + UC_TRICORE_REG_PSW_USB_V = 36 + UC_TRICORE_REG_PSW_USB_SV = 37 + UC_TRICORE_REG_PSW_USB_AV = 38 + UC_TRICORE_REG_PSW_USB_SAV = 39 + UC_TRICORE_REG_PC = 40 + UC_TRICORE_REG_SYSCON = 41 + UC_TRICORE_REG_CPU_ID = 42 + UC_TRICORE_REG_BIV = 43 + UC_TRICORE_REG_BTV = 44 + UC_TRICORE_REG_ISP = 45 + UC_TRICORE_REG_ICR = 46 + UC_TRICORE_REG_FCX = 47 + UC_TRICORE_REG_LCX = 48 + UC_TRICORE_REG_COMPAT = 49 + UC_TRICORE_REG_DPR0_U = 50 + UC_TRICORE_REG_DPR1_U = 51 + UC_TRICORE_REG_DPR2_U = 52 + UC_TRICORE_REG_DPR3_U = 53 + UC_TRICORE_REG_DPR0_L = 54 + UC_TRICORE_REG_DPR1_L = 55 + UC_TRICORE_REG_DPR2_L = 56 + UC_TRICORE_REG_DPR3_L = 57 + UC_TRICORE_REG_CPR0_U = 58 + UC_TRICORE_REG_CPR1_U = 59 + UC_TRICORE_REG_CPR2_U = 60 + UC_TRICORE_REG_CPR3_U = 61 + UC_TRICORE_REG_CPR0_L = 62 + UC_TRICORE_REG_CPR1_L = 63 + UC_TRICORE_REG_CPR2_L = 64 + UC_TRICORE_REG_CPR3_L = 65 + UC_TRICORE_REG_DPM0 = 66 + UC_TRICORE_REG_DPM1 = 67 + UC_TRICORE_REG_DPM2 = 68 + UC_TRICORE_REG_DPM3 = 69 + UC_TRICORE_REG_CPM0 = 70 + UC_TRICORE_REG_CPM1 = 71 + UC_TRICORE_REG_CPM2 = 72 + UC_TRICORE_REG_CPM3 = 73 + UC_TRICORE_REG_MMU_CON = 74 + UC_TRICORE_REG_MMU_ASI = 75 + UC_TRICORE_REG_MMU_TVA = 76 + UC_TRICORE_REG_MMU_TPA = 77 + UC_TRICORE_REG_MMU_TPX = 78 + UC_TRICORE_REG_MMU_TFA = 79 + UC_TRICORE_REG_BMACON = 80 + UC_TRICORE_REG_SMACON = 81 + UC_TRICORE_REG_DIEAR = 82 + UC_TRICORE_REG_DIETR = 83 + UC_TRICORE_REG_CCDIER = 84 + UC_TRICORE_REG_MIECON = 85 + UC_TRICORE_REG_PIEAR = 86 + UC_TRICORE_REG_PIETR = 87 + UC_TRICORE_REG_CCPIER = 88 + UC_TRICORE_REG_DBGSR = 89 + UC_TRICORE_REG_EXEVT = 90 + UC_TRICORE_REG_CREVT = 91 + UC_TRICORE_REG_SWEVT = 92 + UC_TRICORE_REG_TR0EVT = 93 + UC_TRICORE_REG_TR1EVT = 94 + UC_TRICORE_REG_DMS = 95 + UC_TRICORE_REG_DCX = 96 + UC_TRICORE_REG_DBGTCR = 97 + UC_TRICORE_REG_CCTRL = 98 + UC_TRICORE_REG_CCNT = 99 + UC_TRICORE_REG_ICNT = 100 + UC_TRICORE_REG_M1CNT = 101 + UC_TRICORE_REG_M2CNT = 102 + UC_TRICORE_REG_M3CNT = 103 + UC_TRICORE_REG_ENDING = 104 + UC_TRICORE_REG_GA0 = 1 + UC_TRICORE_REG_GA1 = 2 + UC_TRICORE_REG_GA8 = 9 + UC_TRICORE_REG_GA9 = 10 + UC_TRICORE_REG_SP = 11 + UC_TRICORE_REG_LR = 12 + UC_TRICORE_REG_IA = 16 + UC_TRICORE_REG_ID = 32 +end \ No newline at end of file diff --git a/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb b/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb index bcad1aa3..d32f42da 100644 --- a/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb +++ b/bindings/ruby/unicorn_gem/lib/unicorn_engine/unicorn_const.rb @@ -24,7 +24,8 @@ module UnicornEngine UC_ARCH_M68K = 7 UC_ARCH_RISCV = 8 UC_ARCH_S390X = 9 - UC_ARCH_MAX = 10 + UC_ARCH_TRICORE = 10 + UC_ARCH_MAX = 11 UC_MODE_LITTLE_ENDIAN = 0 UC_MODE_BIG_ENDIAN = 1073741824 diff --git a/include/uc_priv.h b/include/uc_priv.h index 8a8ed903..95996080 100644 --- a/include/uc_priv.h +++ b/include/uc_priv.h @@ -33,6 +33,7 @@ #define UC_MODE_RISCV_MASK \ (UC_MODE_RISCV32 | UC_MODE_RISCV64 | UC_MODE_LITTLE_ENDIAN) #define UC_MODE_S390X_MASK (UC_MODE_BIG_ENDIAN) +#define UC_MODE_TRICORE_MASK (UC_MODE_LITTLE_ENDIAN) #define ARR_SIZE(a) (sizeof(a) / sizeof(a[0])) diff --git a/include/unicorn/tricore.h b/include/unicorn/tricore.h new file mode 100644 index 00000000..bb9aa9d1 --- /dev/null +++ b/include/unicorn/tricore.h @@ -0,0 +1,174 @@ +/* This file is released under LGPL2. + See COPYING.LGPL2 in root directory for more details +*/ + +/* + Created for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +#ifndef UNICORN_TRICORE_H +#define UNICORN_TRICORE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#pragma warning(disable : 4201) +#endif + +//> TRICORE CPU +typedef enum uc_cpu_tricore { + UC_CPU_TRICORE_TC1796, + UC_CPU_TRICORE_TC1797, + UC_CPU_TRICORE_TC27X, + + UC_CPU_TRICORE_ENDING +} uc_cpu_tricore; + +//> TRICORE registers +typedef enum uc_tricore_reg { + UC_TRICORE_REG_INVALID = 0, + + // General purpose registers (GPR) + // Address GPR + UC_TRICORE_REG_A0, + UC_TRICORE_REG_A1, + UC_TRICORE_REG_A2, + UC_TRICORE_REG_A3, + UC_TRICORE_REG_A4, + UC_TRICORE_REG_A5, + UC_TRICORE_REG_A6, + UC_TRICORE_REG_A7, + UC_TRICORE_REG_A8, + UC_TRICORE_REG_A9, + UC_TRICORE_REG_A10, + UC_TRICORE_REG_A11, + UC_TRICORE_REG_A12, + UC_TRICORE_REG_A13, + UC_TRICORE_REG_A14, + UC_TRICORE_REG_A15, + // Data GPR + UC_TRICORE_REG_D0, + UC_TRICORE_REG_D1, + UC_TRICORE_REG_D2, + UC_TRICORE_REG_D3, + UC_TRICORE_REG_D4, + UC_TRICORE_REG_D5, + UC_TRICORE_REG_D6, + UC_TRICORE_REG_D7, + UC_TRICORE_REG_D8, + UC_TRICORE_REG_D9, + UC_TRICORE_REG_D10, + UC_TRICORE_REG_D11, + UC_TRICORE_REG_D12, + UC_TRICORE_REG_D13, + UC_TRICORE_REG_D14, + UC_TRICORE_REG_D15, + + /* CSFR Register */ + UC_TRICORE_REG_PCXI, + + UC_TRICORE_REG_PSW, + + /* PSW flag cache for faster execution */ + UC_TRICORE_REG_PSW_USB_C, + UC_TRICORE_REG_PSW_USB_V, + UC_TRICORE_REG_PSW_USB_SV, + UC_TRICORE_REG_PSW_USB_AV, + UC_TRICORE_REG_PSW_USB_SAV, + + UC_TRICORE_REG_PC, + UC_TRICORE_REG_SYSCON, + UC_TRICORE_REG_CPU_ID, + UC_TRICORE_REG_BIV, + UC_TRICORE_REG_BTV, + UC_TRICORE_REG_ISP, + UC_TRICORE_REG_ICR, + UC_TRICORE_REG_FCX, + UC_TRICORE_REG_LCX, + UC_TRICORE_REG_COMPAT, + + UC_TRICORE_REG_DPR0_U, + UC_TRICORE_REG_DPR1_U, + UC_TRICORE_REG_DPR2_U, + UC_TRICORE_REG_DPR3_U, + UC_TRICORE_REG_DPR0_L, + UC_TRICORE_REG_DPR1_L, + UC_TRICORE_REG_DPR2_L, + UC_TRICORE_REG_DPR3_L, + + UC_TRICORE_REG_CPR0_U, + UC_TRICORE_REG_CPR1_U, + UC_TRICORE_REG_CPR2_U, + UC_TRICORE_REG_CPR3_U, + UC_TRICORE_REG_CPR0_L, + UC_TRICORE_REG_CPR1_L, + UC_TRICORE_REG_CPR2_L, + UC_TRICORE_REG_CPR3_L, + + UC_TRICORE_REG_DPM0, + UC_TRICORE_REG_DPM1, + UC_TRICORE_REG_DPM2, + UC_TRICORE_REG_DPM3, + + UC_TRICORE_REG_CPM0, + UC_TRICORE_REG_CPM1, + UC_TRICORE_REG_CPM2, + UC_TRICORE_REG_CPM3, + + /* Memory Management Registers */ + UC_TRICORE_REG_MMU_CON, + UC_TRICORE_REG_MMU_ASI, + UC_TRICORE_REG_MMU_TVA, + UC_TRICORE_REG_MMU_TPA, + UC_TRICORE_REG_MMU_TPX, + UC_TRICORE_REG_MMU_TFA, + + // 1.3.1 Only + UC_TRICORE_REG_BMACON, + UC_TRICORE_REG_SMACON, + UC_TRICORE_REG_DIEAR, + UC_TRICORE_REG_DIETR, + UC_TRICORE_REG_CCDIER, + UC_TRICORE_REG_MIECON, + UC_TRICORE_REG_PIEAR, + UC_TRICORE_REG_PIETR, + UC_TRICORE_REG_CCPIER, + + /* Debug Registers */ + UC_TRICORE_REG_DBGSR, + UC_TRICORE_REG_EXEVT, + UC_TRICORE_REG_CREVT, + UC_TRICORE_REG_SWEVT, + UC_TRICORE_REG_TR0EVT, + UC_TRICORE_REG_TR1EVT, + UC_TRICORE_REG_DMS, + UC_TRICORE_REG_DCX, + UC_TRICORE_REG_DBGTCR, + UC_TRICORE_REG_CCTRL, + UC_TRICORE_REG_CCNT, + UC_TRICORE_REG_ICNT, + UC_TRICORE_REG_M1CNT, + UC_TRICORE_REG_M2CNT, + UC_TRICORE_REG_M3CNT, + + UC_TRICORE_REG_ENDING, // <-- mark the end of the list of registers + + // alias registers + UC_TRICORE_REG_GA0 = UC_TRICORE_REG_A0, + UC_TRICORE_REG_GA1 = UC_TRICORE_REG_A1, + UC_TRICORE_REG_GA8 = UC_TRICORE_REG_A8, + UC_TRICORE_REG_GA9 = UC_TRICORE_REG_A9, + UC_TRICORE_REG_SP = UC_TRICORE_REG_A10, + UC_TRICORE_REG_LR = UC_TRICORE_REG_A11, + UC_TRICORE_REG_IA = UC_TRICORE_REG_A15, + UC_TRICORE_REG_ID = UC_TRICORE_REG_D15, +} uc_tricore_reg; + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/include/unicorn/unicorn.h b/include/unicorn/unicorn.h index 550756fb..2fd07988 100644 --- a/include/unicorn/unicorn.h +++ b/include/unicorn/unicorn.h @@ -35,6 +35,7 @@ typedef size_t uc_hook; #include "ppc.h" #include "riscv.h" #include "s390x.h" +#include "tricore.h" #ifdef __GNUC__ #define DEFAULT_VISIBILITY __attribute__((visibility("default"))) @@ -103,6 +104,7 @@ typedef enum uc_arch { UC_ARCH_M68K, // M68K architecture UC_ARCH_RISCV, // RISCV architecture UC_ARCH_S390X, // S390X architecture + UC_ARCH_TRICORE, // TriCore architecture UC_ARCH_MAX, } uc_arch; diff --git a/msvc/tricore-softmmu/config-target.h b/msvc/tricore-softmmu/config-target.h new file mode 100644 index 00000000..0f9e33ae --- /dev/null +++ b/msvc/tricore-softmmu/config-target.h @@ -0,0 +1,5 @@ +/* Automatically generated by create_config - do not modify */ +#define TARGET_TRICORE 1 +#define TARGET_NAME "tricore" +#define TARGET_TRICORE 1 +#define CONFIG_SOFTMMU 1 diff --git a/qemu/configure b/qemu/configure index 630953b2..80080d0d 100755 --- a/qemu/configure +++ b/qemu/configure @@ -489,6 +489,8 @@ elif check_define __arm__ ; then cpu="arm" elif check_define __aarch64__ ; then cpu="aarch64" +elif check_define __tricore__ ; then + cpu="tricore" else cpu=$(uname -m) fi @@ -528,6 +530,10 @@ case "$cpu" in cpu="sparc" supported_cpu="yes" ;; + tricore) + cpu="tricore" + supported_cpu="yes" + ;; *) # This will result in either an error or falling back to TCI later ARCH=unknown @@ -852,7 +858,8 @@ QEMU_CFLAGS="$CPU_CFLAGS $QEMU_CFLAGS" default_target_list="aarch64-softmmu \ arm-softmmu m68k-softmmu mips64el-softmmu mips64-softmmu mipsel-softmmu \ mips-softmmu ppc64-softmmu ppc-softmmu sparc64-softmmu sparc-softmmu \ - x86_64-softmmu riscv32-softmmu riscv64-softmmu s390x-softmmu" + x86_64-softmmu riscv32-softmmu riscv64-softmmu s390x-softmmu \ + tricore-softmmu" if test x"$show_help" = x"yes" ; then cat << EOF @@ -2747,6 +2754,8 @@ case "$target_name" in tilegx) ;; tricore) + TARGET_ARCH=tricore + TARGET_BASE_ARCH=tricore ;; unicore32) ;; diff --git a/qemu/include/tcg/tcg.h b/qemu/include/tcg/tcg.h index 33acbd7c..f3643fe3 100644 --- a/qemu/include/tcg/tcg.h +++ b/qemu/include/tcg/tcg.h @@ -794,6 +794,12 @@ struct TCGContext { TCGv NULL_QREG; /* Used to distinguish stores from bad addressing modes. */ TCGv store_dummy; + + // target/tricore/translate.c + TCGv_i32 cpu_gpr_a[16]; + TCGv_i32 cpu_gpr_d[16]; + TCGv_i32 cpu_PSW_C, cpu_PSW_V, cpu_PSW_SV, cpu_PSW_AV, cpu_PSW_SAV; + TCGv_i32 cpu_PC, cpu_PCXI, cpu_PSW, cpu_ICR; // Used to store the start of current instrution. uint64_t pc_start; diff --git a/qemu/target/tricore/cpu-param.h b/qemu/target/tricore/cpu-param.h new file mode 100644 index 00000000..cf5d9af8 --- /dev/null +++ b/qemu/target/tricore/cpu-param.h @@ -0,0 +1,17 @@ +/* + * TriCore cpu parameters for qemu. + * + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * SPDX-License-Identifier: LGPL-2.1+ + */ + +#ifndef TRICORE_CPU_PARAM_H +#define TRICORE_CPU_PARAM_H 1 + +#define TARGET_LONG_BITS 32 +#define TARGET_PAGE_BITS 14 +#define TARGET_PHYS_ADDR_SPACE_BITS 32 +#define TARGET_VIRT_ADDR_SPACE_BITS 32 +#define NB_MMU_MODES 3 + +#endif diff --git a/qemu/target/tricore/cpu-qom.h b/qemu/target/tricore/cpu-qom.h new file mode 100644 index 00000000..a8e36b14 --- /dev/null +++ b/qemu/target/tricore/cpu-qom.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + Modified for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +#ifndef QEMU_TRICORE_CPU_QOM_H +#define QEMU_TRICORE_CPU_QOM_H + +#include "hw/core/cpu.h" + +#define TYPE_TRICORE_CPU "tricore-cpu" + +#define TRICORE_CPU(obj) ((TriCoreCPU *)obj) +#define TRICORE_CPU_CLASS(klass) ((TriCoreCPUClass *)klass) +#define TRICORE_CPU_GET_CLASS(obj) (&((TriCoreCPU *)obj)->cc) + +typedef struct TriCoreCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + void (*parent_reset)(CPUState *cpu); +} TriCoreCPUClass; + + +#endif /* QEMU_TRICORE_CPU_QOM_H */ diff --git a/qemu/target/tricore/cpu.c b/qemu/target/tricore/cpu.c new file mode 100644 index 00000000..a55ca63e --- /dev/null +++ b/qemu/target/tricore/cpu.c @@ -0,0 +1,205 @@ +/* + * TriCore emulation for qemu: main translation routines. + * + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + Modified for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "cpu-qom.h" +#include "exec/exec-all.h" + +#include + +static inline void set_feature(CPUTriCoreState *env, int feature) +{ + env->features |= 1ULL << feature; +} + +static void tricore_cpu_set_pc(CPUState *cs, vaddr value) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + + env->PC = value & ~(target_ulong)1; +} + +static void tricore_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + + env->PC = tb->pc; +} + +static void tricore_cpu_reset(CPUState *dev) +{ + CPUState *s = CPU(dev); + TriCoreCPU *cpu = TRICORE_CPU(s); + TriCoreCPUClass *tcc = TRICORE_CPU_GET_CLASS(cpu); + CPUTriCoreState *env = &cpu->env; + + tcc->parent_reset(dev); + + memset(env, 0, offsetof(CPUTriCoreState, end_reset_fields)); + + cpu_state_reset(env); +} + +static bool tricore_cpu_has_work(CPUState *cs) +{ + return true; +} + +static void tricore_cpu_realizefn(CPUState *dev) +{ + CPUState *cs = CPU(dev); + TriCoreCPU *cpu = TRICORE_CPU(dev); + CPUTriCoreState *env = &cpu->env; + + cpu_exec_realizefn(cs); + + /* Some features automatically imply others */ + if (tricore_feature(env, TRICORE_FEATURE_161)) { + set_feature(env, TRICORE_FEATURE_16); + } + + if (tricore_feature(env, TRICORE_FEATURE_16)) { + set_feature(env, TRICORE_FEATURE_131); + } + if (tricore_feature(env, TRICORE_FEATURE_131)) { + set_feature(env, TRICORE_FEATURE_13); + } + cpu_reset(cs); +} + + +static void tricore_cpu_initfn(struct uc_struct *uc, CPUState *obj) +{ + TriCoreCPU *cpu = TRICORE_CPU(obj); + CPUTriCoreState *env = &cpu->env; + + env->uc = uc; + cpu_set_cpustate_pointers(cpu); +} + +static void tc1796_initfn(CPUState *obj) +{ + TriCoreCPU *cpu = TRICORE_CPU(obj); + + set_feature(&cpu->env, TRICORE_FEATURE_13); +} + +static void tc1797_initfn(CPUState *obj) +{ + TriCoreCPU *cpu = TRICORE_CPU(obj); + + set_feature(&cpu->env, TRICORE_FEATURE_131); +} + +static void tc27x_initfn(CPUState *obj) +{ + TriCoreCPU *cpu = TRICORE_CPU(obj); + + set_feature(&cpu->env, TRICORE_FEATURE_161); +} + +static void tricore_cpu_class_init(CPUClass *c) +{ + TriCoreCPUClass *mcc = TRICORE_CPU_CLASS(c); + CPUClass *cc = CPU_CLASS(c); + + /* parent class is CPUClass, parent_reset() is cpu_common_reset(). */ + mcc->parent_reset = cc->reset; + + cc->reset = tricore_cpu_reset; + cc->has_work = tricore_cpu_has_work; + cc->set_pc = tricore_cpu_set_pc; + + cc->synchronize_from_tb = tricore_cpu_synchronize_from_tb; + cc->get_phys_page_debug = tricore_cpu_get_phys_page_debug; + + cc->tlb_fill = tricore_cpu_tlb_fill; + cc->tcg_initialize = tricore_tcg_init; +} + +#define DEFINE_TRICORE_CPU_TYPE(cpu_model, initfn) \ + { \ + .parent = TYPE_TRICORE_CPU, \ + .initfn = initfn, \ + .name = TRICORE_CPU_TYPE_NAME(cpu_model), \ + } + +struct TriCoreCPUInfo { + const char *name; + void (*initfn)(CPUState *obj); +}; + +static struct TriCoreCPUInfo tricore_cpus_type_infos[] = { + { "tc1796", tc1796_initfn }, + { "tc1797", tc1797_initfn }, + { "tc27x", tc27x_initfn }, +}; + +TriCoreCPU *cpu_tricore_init(struct uc_struct *uc) +{ + TriCoreCPU *cpu; + CPUState *cs; + CPUClass *cc; + + cpu = calloc(1, sizeof(*cpu)); + if (cpu == NULL) { + return NULL; + } + + if (uc->cpu_model == INT_MAX) { + uc->cpu_model = 2; // tc27x + } else if (uc->cpu_model >= ARRAY_SIZE(tricore_cpus_type_infos)) { + free(cpu); + return NULL; + } + + cs = (CPUState *)cpu; + cc = (CPUClass *)&cpu->cc; + cs->cc = cc; + cs->uc = uc; + uc->cpu = cs; + + cpu_class_init(uc, cc); + + tricore_cpu_class_init(cc); + + cpu_common_initfn(uc, cs); + + tricore_cpu_initfn(uc, cs); + + tricore_cpus_type_infos[uc->cpu_model].initfn(cs); + + tricore_cpu_realizefn(cs); + + // init address space + cpu_address_space_init(cs, 0, cs->memory); + + qemu_init_vcpu(cs); + + return cpu; +} + diff --git a/qemu/target/tricore/cpu.h b/qemu/target/tricore/cpu.h new file mode 100644 index 00000000..83b2823e --- /dev/null +++ b/qemu/target/tricore/cpu.h @@ -0,0 +1,410 @@ +/* + * TriCore emulation for qemu: main CPU struct. + * + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + Modified for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +#ifndef TRICORE_CPU_H +#define TRICORE_CPU_H + +#include "cpu-qom.h" +#include "exec/cpu-defs.h" +#include "tricore-defs.h" + +struct tricore_boot_info; + +typedef struct tricore_def_t tricore_def_t; + +// struct CPUTriCoreState { +typedef struct CPUTriCoreState { + /* GPR Register */ + uint32_t gpr_a[16]; + uint32_t gpr_d[16]; + /* CSFR Register */ + uint32_t PCXI; +/* Frequently accessed PSW_USB bits are stored separately for efficiency. + This contains all the other bits. Use psw_{read,write} to access + the whole PSW. */ + uint32_t PSW; + + /* PSW flag cache for faster execution + */ + uint32_t PSW_USB_C; + uint32_t PSW_USB_V; /* Only if bit 31 set, then flag is set */ + uint32_t PSW_USB_SV; /* Only if bit 31 set, then flag is set */ + uint32_t PSW_USB_AV; /* Only if bit 31 set, then flag is set. */ + uint32_t PSW_USB_SAV; /* Only if bit 31 set, then flag is set. */ + + uint32_t PC; + uint32_t SYSCON; + uint32_t CPU_ID; + uint32_t CORE_ID; + uint32_t BIV; + uint32_t BTV; + uint32_t ISP; + uint32_t ICR; + uint32_t FCX; + uint32_t LCX; + uint32_t COMPAT; + + /* Mem Protection Register */ + uint32_t DPR0_0L; + uint32_t DPR0_0U; + uint32_t DPR0_1L; + uint32_t DPR0_1U; + uint32_t DPR0_2L; + uint32_t DPR0_2U; + uint32_t DPR0_3L; + uint32_t DPR0_3U; + + uint32_t DPR1_0L; + uint32_t DPR1_0U; + uint32_t DPR1_1L; + uint32_t DPR1_1U; + uint32_t DPR1_2L; + uint32_t DPR1_2U; + uint32_t DPR1_3L; + uint32_t DPR1_3U; + + uint32_t DPR2_0L; + uint32_t DPR2_0U; + uint32_t DPR2_1L; + uint32_t DPR2_1U; + uint32_t DPR2_2L; + uint32_t DPR2_2U; + uint32_t DPR2_3L; + uint32_t DPR2_3U; + + uint32_t DPR3_0L; + uint32_t DPR3_0U; + uint32_t DPR3_1L; + uint32_t DPR3_1U; + uint32_t DPR3_2L; + uint32_t DPR3_2U; + uint32_t DPR3_3L; + uint32_t DPR3_3U; + + uint32_t CPR0_0L; + uint32_t CPR0_0U; + uint32_t CPR0_1L; + uint32_t CPR0_1U; + uint32_t CPR0_2L; + uint32_t CPR0_2U; + uint32_t CPR0_3L; + uint32_t CPR0_3U; + + uint32_t CPR1_0L; + uint32_t CPR1_0U; + uint32_t CPR1_1L; + uint32_t CPR1_1U; + uint32_t CPR1_2L; + uint32_t CPR1_2U; + uint32_t CPR1_3L; + uint32_t CPR1_3U; + + uint32_t CPR2_0L; + uint32_t CPR2_0U; + uint32_t CPR2_1L; + uint32_t CPR2_1U; + uint32_t CPR2_2L; + uint32_t CPR2_2U; + uint32_t CPR2_3L; + uint32_t CPR2_3U; + + uint32_t CPR3_0L; + uint32_t CPR3_0U; + uint32_t CPR3_1L; + uint32_t CPR3_1U; + uint32_t CPR3_2L; + uint32_t CPR3_2U; + uint32_t CPR3_3L; + uint32_t CPR3_3U; + + uint32_t DPM0; + uint32_t DPM1; + uint32_t DPM2; + uint32_t DPM3; + + uint32_t CPM0; + uint32_t CPM1; + uint32_t CPM2; + uint32_t CPM3; + + /* Memory Management Registers */ + uint32_t MMU_CON; + uint32_t MMU_ASI; + uint32_t MMU_TVA; + uint32_t MMU_TPA; + uint32_t MMU_TPX; + uint32_t MMU_TFA; + /* {1.3.1 only */ + uint32_t BMACON; + uint32_t SMACON; + uint32_t DIEAR; + uint32_t DIETR; + uint32_t CCDIER; + uint32_t MIECON; + uint32_t PIEAR; + uint32_t PIETR; + uint32_t CCPIER; + /*} */ + /* Debug Registers */ + uint32_t DBGSR; + uint32_t EXEVT; + uint32_t CREVT; + uint32_t SWEVT; + uint32_t TR0EVT; + uint32_t TR1EVT; + uint32_t DMS; + uint32_t DCX; + uint32_t DBGTCR; + uint32_t CCTRL; + uint32_t CCNT; + uint32_t ICNT; + uint32_t M1CNT; + uint32_t M2CNT; + uint32_t M3CNT; + /* Floating Point Registers */ + float_status fp_status; + /* QEMU */ + int error_code; + uint32_t hflags; /* CPU State */ + + const tricore_def_t *cpu_model; + void *irq[8]; + struct QEMUTimer *timer; /* Internal timer */ + + /* Fields up to this point are cleared by a CPU reset */ + int end_reset_fields; + + /* Fields from here on are preserved across CPU reset. */ + uint32_t features; + + // Unicorn engine + struct uc_struct *uc; +} CPUTriCoreState; + +/** + * TriCoreCPU: + * @env: #CPUTriCoreState + * + * A TriCore CPU. + */ +// TODO: Why is the type def needed? Without it the later typedef fails to find this... ? +typedef struct TriCoreCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + + CPUNegativeOffsetState neg; + CPUTriCoreState env; + + struct TriCoreCPUClass cc; +} TriCoreCPU; + +hwaddr tricore_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +void tricore_cpu_dump_state(CPUState *cpu, FILE *f, int flags); + + +#define MASK_PCXI_PCPN 0xff000000 +#define MASK_PCXI_PIE_1_3 0x00800000 +#define MASK_PCXI_PIE_1_6 0x00200000 +#define MASK_PCXI_UL 0x00400000 +#define MASK_PCXI_PCXS 0x000f0000 +#define MASK_PCXI_PCXO 0x0000ffff + +#define MASK_PSW_USB 0xff000000 +#define MASK_USB_C 0x80000000 +#define MASK_USB_V 0x40000000 +#define MASK_USB_SV 0x20000000 +#define MASK_USB_AV 0x10000000 +#define MASK_USB_SAV 0x08000000 +#define MASK_PSW_PRS 0x00003000 +#define MASK_PSW_IO 0x00000c00 +#define MASK_PSW_IS 0x00000200 +#define MASK_PSW_GW 0x00000100 +#define MASK_PSW_CDE 0x00000080 +#define MASK_PSW_CDC 0x0000007f +#define MASK_PSW_FPU_RM 0x3000000 + +#define MASK_SYSCON_PRO_TEN 0x2 +#define MASK_SYSCON_FCD_SF 0x1 + +#define MASK_CPUID_MOD 0xffff0000 +#define MASK_CPUID_MOD_32B 0x0000ff00 +#define MASK_CPUID_REV 0x000000ff + +#define MASK_ICR_PIPN 0x00ff0000 +#define MASK_ICR_IE_1_3 0x00000100 +#define MASK_ICR_IE_1_6 0x00008000 +#define MASK_ICR_CCPN 0x000000ff + +#define MASK_FCX_FCXS 0x000f0000 +#define MASK_FCX_FCXO 0x0000ffff + +#define MASK_LCX_LCXS 0x000f0000 +#define MASK_LCX_LCX0 0x0000ffff + +#define MASK_DBGSR_DE 0x1 +#define MASK_DBGSR_HALT 0x6 +#define MASK_DBGSR_SUSP 0x10 +#define MASK_DBGSR_PREVSUSP 0x20 +#define MASK_DBGSR_PEVT 0x40 +#define MASK_DBGSR_EVTSRC 0x1f00 + +#define TRICORE_HFLAG_KUU 0x3 +#define TRICORE_HFLAG_UM0 0x00002 /* user mode-0 flag */ +#define TRICORE_HFLAG_UM1 0x00001 /* user mode-1 flag */ +#define TRICORE_HFLAG_SM 0x00000 /* kernel mode flag */ + +enum tricore_features { + TRICORE_FEATURE_13, + TRICORE_FEATURE_131, + TRICORE_FEATURE_16, + TRICORE_FEATURE_161, +}; + +static inline int tricore_feature(CPUTriCoreState *env, int feature) +{ + return (env->features & (1ULL << feature)) != 0; +} + +/* TriCore Traps Classes*/ +enum { + TRAPC_NONE = -1, + TRAPC_MMU = 0, + TRAPC_PROT = 1, + TRAPC_INSN_ERR = 2, + TRAPC_CTX_MNG = 3, + TRAPC_SYSBUS = 4, + TRAPC_ASSERT = 5, + TRAPC_SYSCALL = 6, + TRAPC_NMI = 7, + TRAPC_IRQ = 8 +}; + +/* Class 0 TIN */ +enum { + TIN0_VAF = 0, + TIN0_VAP = 1, +}; + +/* Class 1 TIN */ +enum { + TIN1_PRIV = 1, + TIN1_MPR = 2, + TIN1_MPW = 3, + TIN1_MPX = 4, + TIN1_MPP = 5, + TIN1_MPN = 6, + TIN1_GRWP = 7, +}; + +/* Class 2 TIN */ +enum { + TIN2_IOPC = 1, + TIN2_UOPC = 2, + TIN2_OPD = 3, + TIN2_ALN = 4, + TIN2_MEM = 5, +}; + +/* Class 3 TIN */ +enum { + TIN3_FCD = 1, + TIN3_CDO = 2, + TIN3_CDU = 3, + TIN3_FCU = 4, + TIN3_CSU = 5, + TIN3_CTYP = 6, + TIN3_NEST = 7, +}; + +/* Class 4 TIN */ +enum { + TIN4_PSE = 1, + TIN4_DSE = 2, + TIN4_DAE = 3, + TIN4_CAE = 4, + TIN4_PIE = 5, + TIN4_DIE = 6, +}; + +/* Class 5 TIN */ +enum { + TIN5_OVF = 1, + TIN5_SOVF = 1, +}; + +/* Class 6 TIN + * + * Is always TIN6_SYS + */ + +/* Class 7 TIN */ +enum { + TIN7_NMI = 0, +}; + +uint32_t psw_read(CPUTriCoreState *env); +void psw_write(CPUTriCoreState *env, uint32_t val); +int tricore_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n); +int tricore_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n); + +void fpu_set_state(CPUTriCoreState *env); + +#define MMU_USER_IDX 2 + +void tricore_cpu_list(void); + +#define cpu_list tricore_cpu_list + +static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch) +{ + return 0; +} + +typedef CPUTriCoreState CPUArchState; +typedef TriCoreCPU ArchCPU; + +#include "exec/cpu-all.h" + +void cpu_state_reset(CPUTriCoreState *s); +void tricore_tcg_init(struct uc_struct *uc); + +static inline void cpu_get_tb_cpu_state(CPUTriCoreState *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *flags) +{ + *pc = env->PC; + *cs_base = 0; + *flags = 0; +} + +#define TRICORE_CPU_TYPE_SUFFIX "-" TYPE_TRICORE_CPU +#define TRICORE_CPU_TYPE_NAME(model) model TRICORE_CPU_TYPE_SUFFIX +#define CPU_RESOLVING_TYPE TYPE_TRICORE_CPU + +/* helpers.c */ +bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); + +#endif /* TRICORE_CPU_H */ diff --git a/qemu/target/tricore/csfr.def b/qemu/target/tricore/csfr.def new file mode 100644 index 00000000..ff004cbd --- /dev/null +++ b/qemu/target/tricore/csfr.def @@ -0,0 +1,125 @@ +/* A(ll) access permited + R(ead only) access + E(nd init protected) access + + A|R|E(offset, register, feature introducing reg) + + NOTE: PSW is handled as a special case in gen_mtcr/mfcr */ + +A(0xfe00, PCXI, TRICORE_FEATURE_13) +A(0xfe08, PC, TRICORE_FEATURE_13) +A(0xfe14, SYSCON, TRICORE_FEATURE_13) +R(0xfe18, CPU_ID, TRICORE_FEATURE_13) +R(0xfe1c, CORE_ID, TRICORE_FEATURE_161) +E(0xfe20, BIV, TRICORE_FEATURE_13) +E(0xfe24, BTV, TRICORE_FEATURE_13) +E(0xfe28, ISP, TRICORE_FEATURE_13) +A(0xfe2c, ICR, TRICORE_FEATURE_13) +A(0xfe38, FCX, TRICORE_FEATURE_13) +A(0xfe3c, LCX, TRICORE_FEATURE_13) +E(0x9400, COMPAT, TRICORE_FEATURE_131) +/* memory protection register */ +A(0xC000, DPR0_0L, TRICORE_FEATURE_13) +A(0xC004, DPR0_0U, TRICORE_FEATURE_13) +A(0xC008, DPR0_1L, TRICORE_FEATURE_13) +A(0xC00C, DPR0_1U, TRICORE_FEATURE_13) +A(0xC010, DPR0_2L, TRICORE_FEATURE_13) +A(0xC014, DPR0_2U, TRICORE_FEATURE_13) +A(0xC018, DPR0_3L, TRICORE_FEATURE_13) +A(0xC01C, DPR0_3U, TRICORE_FEATURE_13) +A(0xC400, DPR1_0L, TRICORE_FEATURE_13) +A(0xC404, DPR1_0U, TRICORE_FEATURE_13) +A(0xC408, DPR1_1L, TRICORE_FEATURE_13) +A(0xC40C, DPR1_1U, TRICORE_FEATURE_13) +A(0xC410, DPR1_2L, TRICORE_FEATURE_13) +A(0xC414, DPR1_2U, TRICORE_FEATURE_13) +A(0xC418, DPR1_3L, TRICORE_FEATURE_13) +A(0xC41C, DPR1_3U, TRICORE_FEATURE_13) +A(0xC800, DPR2_0L, TRICORE_FEATURE_13) +A(0xC804, DPR2_0U, TRICORE_FEATURE_13) +A(0xC808, DPR2_1L, TRICORE_FEATURE_13) +A(0xC80C, DPR2_1U, TRICORE_FEATURE_13) +A(0xC810, DPR2_2L, TRICORE_FEATURE_13) +A(0xC814, DPR2_2U, TRICORE_FEATURE_13) +A(0xC818, DPR2_3L, TRICORE_FEATURE_13) +A(0xC81C, DPR2_3U, TRICORE_FEATURE_13) +A(0xCC00, DPR3_0L, TRICORE_FEATURE_13) +A(0xCC04, DPR3_0U, TRICORE_FEATURE_13) +A(0xCC08, DPR3_1L, TRICORE_FEATURE_13) +A(0xCC0C, DPR3_1U, TRICORE_FEATURE_13) +A(0xCC10, DPR3_2L, TRICORE_FEATURE_13) +A(0xCC14, DPR3_2U, TRICORE_FEATURE_13) +A(0xCC18, DPR3_3L, TRICORE_FEATURE_13) +A(0xCC1C, DPR3_3U, TRICORE_FEATURE_13) +A(0xD000, CPR0_0L, TRICORE_FEATURE_13) +A(0xD004, CPR0_0U, TRICORE_FEATURE_13) +A(0xD008, CPR0_1L, TRICORE_FEATURE_13) +A(0xD00C, CPR0_1U, TRICORE_FEATURE_13) +A(0xD010, CPR0_2L, TRICORE_FEATURE_13) +A(0xD014, CPR0_2U, TRICORE_FEATURE_13) +A(0xD018, CPR0_3L, TRICORE_FEATURE_13) +A(0xD01C, CPR0_3U, TRICORE_FEATURE_13) +A(0xD400, CPR1_0L, TRICORE_FEATURE_13) +A(0xD404, CPR1_0U, TRICORE_FEATURE_13) +A(0xD408, CPR1_1L, TRICORE_FEATURE_13) +A(0xD40C, CPR1_1U, TRICORE_FEATURE_13) +A(0xD410, CPR1_2L, TRICORE_FEATURE_13) +A(0xD414, CPR1_2U, TRICORE_FEATURE_13) +A(0xD418, CPR1_3L, TRICORE_FEATURE_13) +A(0xD41C, CPR1_3U, TRICORE_FEATURE_13) +A(0xD800, CPR2_0L, TRICORE_FEATURE_13) +A(0xD804, CPR2_0U, TRICORE_FEATURE_13) +A(0xD808, CPR2_1L, TRICORE_FEATURE_13) +A(0xD80C, CPR2_1U, TRICORE_FEATURE_13) +A(0xD810, CPR2_2L, TRICORE_FEATURE_13) +A(0xD814, CPR2_2U, TRICORE_FEATURE_13) +A(0xD818, CPR2_3L, TRICORE_FEATURE_13) +A(0xD81C, CPR2_3U, TRICORE_FEATURE_13) +A(0xDC00, CPR3_0L, TRICORE_FEATURE_13) +A(0xDC04, CPR3_0U, TRICORE_FEATURE_13) +A(0xDC08, CPR3_1L, TRICORE_FEATURE_13) +A(0xDC0C, CPR3_1U, TRICORE_FEATURE_13) +A(0xDC10, CPR3_2L, TRICORE_FEATURE_13) +A(0xDC14, CPR3_2U, TRICORE_FEATURE_13) +A(0xDC18, CPR3_3L, TRICORE_FEATURE_13) +A(0xDC1C, CPR3_3U, TRICORE_FEATURE_13) +A(0xE000, DPM0, TRICORE_FEATURE_13) +A(0xE080, DPM1, TRICORE_FEATURE_13) +A(0xE100, DPM2, TRICORE_FEATURE_13) +A(0xE180, DPM3, TRICORE_FEATURE_13) +A(0xE200, CPM0, TRICORE_FEATURE_13) +A(0xE280, CPM1, TRICORE_FEATURE_13) +A(0xE300, CPM2, TRICORE_FEATURE_13) +A(0xE380, CPM3, TRICORE_FEATURE_13) +/* memory management registers */ +A(0x8000, MMU_CON, TRICORE_FEATURE_13) +A(0x8004, MMU_ASI, TRICORE_FEATURE_13) +A(0x800C, MMU_TVA, TRICORE_FEATURE_13) +A(0x8010, MMU_TPA, TRICORE_FEATURE_13) +A(0x8014, MMU_TPX, TRICORE_FEATURE_13) +A(0x8018, MMU_TFA, TRICORE_FEATURE_13) +E(0x9004, BMACON, TRICORE_FEATURE_131) +E(0x900C, SMACON, TRICORE_FEATURE_131) +A(0x9020, DIEAR, TRICORE_FEATURE_131) +A(0x9024, DIETR, TRICORE_FEATURE_131) +A(0x9028, CCDIER, TRICORE_FEATURE_131) +E(0x9044, MIECON, TRICORE_FEATURE_131) +A(0x9210, PIEAR, TRICORE_FEATURE_131) +A(0x9214, PIETR, TRICORE_FEATURE_131) +A(0x9218, CCPIER, TRICORE_FEATURE_131) +/* debug registers */ +A(0xFD00, DBGSR, TRICORE_FEATURE_13) +A(0xFD08, EXEVT, TRICORE_FEATURE_13) +A(0xFD0C, CREVT, TRICORE_FEATURE_13) +A(0xFD10, SWEVT, TRICORE_FEATURE_13) +A(0xFD20, TR0EVT, TRICORE_FEATURE_13) +A(0xFD24, TR1EVT, TRICORE_FEATURE_13) +A(0xFD40, DMS, TRICORE_FEATURE_13) +A(0xFD44, DCX, TRICORE_FEATURE_13) +A(0xFD48, DBGTCR, TRICORE_FEATURE_131) +A(0xFC00, CCTRL, TRICORE_FEATURE_131) +A(0xFC04, CCNT, TRICORE_FEATURE_131) +A(0xFC08, ICNT, TRICORE_FEATURE_131) +A(0xFC0C, M1CNT, TRICORE_FEATURE_131) +A(0xFC10, M2CNT, TRICORE_FEATURE_131) +A(0xFC14, M3CNT, TRICORE_FEATURE_131) diff --git a/qemu/target/tricore/fpu_helper.c b/qemu/target/tricore/fpu_helper.c new file mode 100644 index 00000000..48767d51 --- /dev/null +++ b/qemu/target/tricore/fpu_helper.c @@ -0,0 +1,478 @@ +/* + * TriCore emulation for qemu: fpu helper. + * + * Copyright (c) 2016 Bastian Koppelmann University of Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + Modified for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu/host-utils.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" + +#define QUIET_NAN 0x7fc00000 +#define ADD_NAN 0x7fc00001 +#define SQRT_NAN 0x7fc00004 +#define DIV_NAN 0x7fc00008 +#define MUL_NAN 0x7fc00002 +#define FPU_FS PSW_USB_C +#define FPU_FI PSW_USB_V +#define FPU_FV PSW_USB_SV +#define FPU_FZ PSW_USB_AV +#define FPU_FU PSW_USB_SAV + +#define float32_sqrt_nan make_float32(SQRT_NAN) +#define float32_quiet_nan make_float32(QUIET_NAN) + +/* we don't care about input_denormal */ +static inline uint8_t f_get_excp_flags(CPUTriCoreState *env) +{ + return get_float_exception_flags(&env->fp_status) + & (float_flag_invalid + | float_flag_overflow + | float_flag_underflow + | float_flag_output_denormal + | float_flag_divbyzero + | float_flag_inexact); +} + +static inline float32 f_maddsub_nan_result(float32 arg1, float32 arg2, + float32 arg3, float32 result, + uint32_t muladd_negate_c) +{ + uint32_t aSign, bSign, cSign; + uint32_t aExp, bExp, cExp; + + if (float32_is_any_nan(arg1) || float32_is_any_nan(arg2) || + float32_is_any_nan(arg3)) { + return QUIET_NAN; + } else if (float32_is_infinity(arg1) && float32_is_zero(arg2)) { + return MUL_NAN; + } else if (float32_is_zero(arg1) && float32_is_infinity(arg2)) { + return MUL_NAN; + } else { + aSign = arg1 >> 31; + bSign = arg2 >> 31; + cSign = arg3 >> 31; + + aExp = (arg1 >> 23) & 0xff; + bExp = (arg2 >> 23) & 0xff; + cExp = (arg3 >> 23) & 0xff; + + if (muladd_negate_c) { + cSign ^= 1; + } + if (((aExp == 0xff) || (bExp == 0xff)) && (cExp == 0xff)) { + if (aSign ^ bSign ^ cSign) { + return ADD_NAN; + } + } + } + + return result; +} + +static void f_update_psw_flags(CPUTriCoreState *env, uint8_t flags) +{ + uint8_t some_excp = 0; + set_float_exception_flags(0, &env->fp_status); + + if (flags & float_flag_invalid) { + env->FPU_FI = 1 << 31; + some_excp = 1; + } + + if (flags & float_flag_overflow) { + env->FPU_FV = 1 << 31; + some_excp = 1; + } + + if (flags & float_flag_underflow || flags & float_flag_output_denormal) { + env->FPU_FU = 1 << 31; + some_excp = 1; + } + + if (flags & float_flag_divbyzero) { + env->FPU_FZ = 1 << 31; + some_excp = 1; + } + + if (flags & float_flag_inexact || flags & float_flag_output_denormal) { + env->PSW |= 1 << 26; + some_excp = 1; + } + + env->FPU_FS = some_excp; +} + +#define FADD_SUB(op) \ +uint32_t helper_f##op(CPUTriCoreState *env, uint32_t r1, uint32_t r2) \ +{ \ + float32 arg1 = make_float32(r1); \ + float32 arg2 = make_float32(r2); \ + uint32_t flags; \ + float32 f_result; \ + \ + f_result = float32_##op(arg2, arg1, &env->fp_status); \ + flags = f_get_excp_flags(env); \ + if (flags) { \ + /* If the output is a NaN, but the inputs aren't, \ + we return a unique value. */ \ + if ((flags & float_flag_invalid) \ + && !float32_is_any_nan(arg1) \ + && !float32_is_any_nan(arg2)) { \ + f_result = ADD_NAN; \ + } \ + f_update_psw_flags(env, flags); \ + } else { \ + env->FPU_FS = 0; \ + } \ + return (uint32_t)f_result; \ +} +FADD_SUB(add) +FADD_SUB(sub) + +uint32_t helper_fmul(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint32_t flags; + float32 arg1 = make_float32(r1); + float32 arg2 = make_float32(r2); + float32 f_result; + + f_result = float32_mul(arg1, arg2, &env->fp_status); + + flags = f_get_excp_flags(env); + if (flags) { + /* If the output is a NaN, but the inputs aren't, + we return a unique value. */ + if ((flags & float_flag_invalid) + && !float32_is_any_nan(arg1) + && !float32_is_any_nan(arg2)) { + f_result = MUL_NAN; + } + f_update_psw_flags(env, flags); + } else { + env->FPU_FS = 0; + } + return (uint32_t)f_result; + +} + +/* + * Target TriCore QSEED.F significand Lookup Table + * + * The QSEED.F output significand depends on the least-significant + * exponent bit and the 6 most-significant significand bits. + * + * IEEE 754 float datatype + * partitioned into Sign (S), Exponent (E) and Significand (M): + * + * S E E E E E E E E M M M M M M ... + * | | | + * +------+------+-------+-------+ + * | | + * for lookup table + * calculating index for + * output E output M + * + * This lookup table was extracted by analyzing QSEED output + * from the real hardware + */ +static const uint8_t target_qseed_significand_table[128] = { + 253, 252, 245, 244, 239, 238, 231, 230, 225, 224, 217, 216, + 211, 210, 205, 204, 201, 200, 195, 194, 189, 188, 185, 184, + 179, 178, 175, 174, 169, 168, 165, 164, 161, 160, 157, 156, + 153, 152, 149, 148, 145, 144, 141, 140, 137, 136, 133, 132, + 131, 130, 127, 126, 123, 122, 121, 120, 117, 116, 115, 114, + 111, 110, 109, 108, 103, 102, 99, 98, 93, 92, 89, 88, 83, + 82, 79, 78, 75, 74, 71, 70, 67, 66, 63, 62, 59, 58, 55, + 54, 53, 52, 49, 48, 45, 44, 43, 42, 39, 38, 37, 36, 33, + 32, 31, 30, 27, 26, 25, 24, 23, 22, 19, 18, 17, 16, 15, + 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2 +}; + +uint32_t helper_qseed(CPUTriCoreState *env, uint32_t r1) +{ + uint32_t arg1, S, E, M, E_minus_one, m_idx; + uint32_t new_E, new_M, new_S, result; + + arg1 = make_float32(r1); + + /* fetch IEEE-754 fields S, E and the uppermost 6-bit of M */ + S = extract32(arg1, 31, 1); + E = extract32(arg1, 23, 8); + M = extract32(arg1, 17, 6); + + if (float32_is_any_nan(arg1)) { + result = float32_quiet_nan; + } else if (float32_is_zero_or_denormal(arg1)) { + if (float32_is_neg(arg1)) { + result = float32_infinity | (1 << 31); + } else { + result = float32_infinity; + } + } else if (float32_is_neg(arg1)) { + result = float32_sqrt_nan; + } else if (float32_is_infinity(arg1)) { + result = float32_zero; + } else { + E_minus_one = E - 1; + m_idx = ((E_minus_one & 1) << 6) | M; + new_S = S; + new_E = 0xBD - E_minus_one / 2; + new_M = target_qseed_significand_table[m_idx]; + + result = 0; + result = deposit32(result, 31, 1, new_S); + result = deposit32(result, 23, 8, new_E); + result = deposit32(result, 15, 8, new_M); + } + + if (float32_is_signaling_nan(arg1, &env->fp_status) + || result == float32_sqrt_nan) { + env->FPU_FI = 1 << 31; + env->FPU_FS = 1; + } else { + env->FPU_FS = 0; + } + + return (uint32_t) result; +} + +uint32_t helper_fdiv(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint32_t flags; + float32 arg1 = make_float32(r1); + float32 arg2 = make_float32(r2); + float32 f_result; + + f_result = float32_div(arg1, arg2 , &env->fp_status); + + flags = f_get_excp_flags(env); + if (flags) { + /* If the output is a NaN, but the inputs aren't, + we return a unique value. */ + if ((flags & float_flag_invalid) + && !float32_is_any_nan(arg1) + && !float32_is_any_nan(arg2)) { + f_result = DIV_NAN; + } + f_update_psw_flags(env, flags); + } else { + env->FPU_FS = 0; + } + + return (uint32_t)f_result; +} + +uint32_t helper_fmadd(CPUTriCoreState *env, uint32_t r1, + uint32_t r2, uint32_t r3) +{ + uint32_t flags; + float32 arg1 = make_float32(r1); + float32 arg2 = make_float32(r2); + float32 arg3 = make_float32(r3); + float32 f_result; + + f_result = float32_muladd(arg1, arg2, arg3, 0, &env->fp_status); + + flags = f_get_excp_flags(env); + if (flags) { + if (flags & float_flag_invalid) { + arg1 = float32_squash_input_denormal(arg1, &env->fp_status); + arg2 = float32_squash_input_denormal(arg2, &env->fp_status); + arg3 = float32_squash_input_denormal(arg3, &env->fp_status); + f_result = f_maddsub_nan_result(arg1, arg2, arg3, f_result, 0); + } + f_update_psw_flags(env, flags); + } else { + env->FPU_FS = 0; + } + return (uint32_t)f_result; +} + +uint32_t helper_fmsub(CPUTriCoreState *env, uint32_t r1, + uint32_t r2, uint32_t r3) +{ + uint32_t flags; + float32 arg1 = make_float32(r1); + float32 arg2 = make_float32(r2); + float32 arg3 = make_float32(r3); + float32 f_result; + + f_result = float32_muladd(arg1, arg2, arg3, float_muladd_negate_product, + &env->fp_status); + + flags = f_get_excp_flags(env); + if (flags) { + if (flags & float_flag_invalid) { + arg1 = float32_squash_input_denormal(arg1, &env->fp_status); + arg2 = float32_squash_input_denormal(arg2, &env->fp_status); + arg3 = float32_squash_input_denormal(arg3, &env->fp_status); + + f_result = f_maddsub_nan_result(arg1, arg2, arg3, f_result, 1); + } + f_update_psw_flags(env, flags); + } else { + env->FPU_FS = 0; + } + return (uint32_t)f_result; +} + +uint32_t helper_fcmp(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint32_t result, flags; + float32 arg1 = make_float32(r1); + float32 arg2 = make_float32(r2); + + set_flush_inputs_to_zero(0, &env->fp_status); + + result = 1 << (float32_compare_quiet(arg1, arg2, &env->fp_status) + 1); + result |= float32_is_denormal(arg1) << 4; + result |= float32_is_denormal(arg2) << 5; + + flags = f_get_excp_flags(env); + if (flags) { + f_update_psw_flags(env, flags); + } else { + env->FPU_FS = 0; + } + + set_flush_inputs_to_zero(1, &env->fp_status); + return result; +} + +uint32_t helper_ftoi(CPUTriCoreState *env, uint32_t arg) +{ + float32 f_arg = make_float32(arg); + int32_t result, flags; + + result = float32_to_int32(f_arg, &env->fp_status); + + flags = f_get_excp_flags(env); + if (flags) { + if (float32_is_any_nan(f_arg)) { + result = 0; + } + f_update_psw_flags(env, flags); + } else { + env->FPU_FS = 0; + } + return (uint32_t)result; +} + +uint32_t helper_itof(CPUTriCoreState *env, uint32_t arg) +{ + float32 f_result; + uint32_t flags; + f_result = int32_to_float32(arg, &env->fp_status); + + flags = f_get_excp_flags(env); + if (flags) { + f_update_psw_flags(env, flags); + } else { + env->FPU_FS = 0; + } + return (uint32_t)f_result; +} + +uint32_t helper_utof(CPUTriCoreState *env, uint32_t arg) +{ + float32 f_result; + uint32_t flags; + + f_result = uint32_to_float32(arg, &env->fp_status); + + flags = f_get_excp_flags(env); + if (flags) { + f_update_psw_flags(env, flags); + } else { + env->FPU_FS = 0; + } + return (uint32_t)f_result; +} + +uint32_t helper_ftoiz(CPUTriCoreState *env, uint32_t arg) +{ + float32 f_arg = make_float32(arg); + uint32_t result; + int32_t flags; + + result = float32_to_int32_round_to_zero(f_arg, &env->fp_status); + + flags = f_get_excp_flags(env); + if (flags & float_flag_invalid) { + flags &= ~float_flag_inexact; + if (float32_is_any_nan(f_arg)) { + result = 0; + } + } + + if (flags) { + f_update_psw_flags(env, flags); + } else { + env->FPU_FS = 0; + } + + return result; +} + +uint32_t helper_ftouz(CPUTriCoreState *env, uint32_t arg) +{ + float32 f_arg = make_float32(arg); + uint32_t result; + int32_t flags; + + result = float32_to_uint32_round_to_zero(f_arg, &env->fp_status); + + flags = f_get_excp_flags(env); + if (flags & float_flag_invalid) { + flags &= ~float_flag_inexact; + if (float32_is_any_nan(f_arg)) { + result = 0; + } + } else if (float32_lt_quiet(f_arg, 0, &env->fp_status)) { + flags = float_flag_invalid; + result = 0; + } + + if (flags) { + f_update_psw_flags(env, flags); + } else { + env->FPU_FS = 0; + } + return result; +} + +void helper_updfl(CPUTriCoreState *env, uint32_t arg) +{ + env->FPU_FS = extract32(arg, 7, 1) & extract32(arg, 15, 1); + env->FPU_FI = (extract32(arg, 6, 1) & extract32(arg, 14, 1)) << 31; + env->FPU_FV = (extract32(arg, 5, 1) & extract32(arg, 13, 1)) << 31; + env->FPU_FZ = (extract32(arg, 4, 1) & extract32(arg, 12, 1)) << 31; + env->FPU_FU = (extract32(arg, 3, 1) & extract32(arg, 11, 1)) << 31; + /* clear FX and RM */ + env->PSW &= ~(extract32(arg, 10, 1) << 26); + env->PSW |= (extract32(arg, 2, 1) & extract32(arg, 10, 1)) << 26; + + fpu_set_state(env); +} diff --git a/qemu/target/tricore/helper.c b/qemu/target/tricore/helper.c new file mode 100644 index 00000000..83ff0915 --- /dev/null +++ b/qemu/target/tricore/helper.c @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + Modified for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +#include "qemu/osdep.h" + +#include "cpu.h" +#include "exec/exec-all.h" +#include "fpu/softfloat-helpers.h" + +enum { + TLBRET_DIRTY = -4, + TLBRET_INVALID = -3, + TLBRET_NOMATCH = -2, + TLBRET_BADADDR = -1, + TLBRET_MATCH = 0 +}; + +#if defined(CONFIG_SOFTMMU) +static int get_physical_address(CPUTriCoreState *env, hwaddr *physical, + int *prot, target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + int ret = TLBRET_MATCH; + + *physical = address & 0xFFFFFFFF; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + + return ret; +} + +hwaddr tricore_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + hwaddr phys_addr; + int prot; + int mmu_idx = cpu_mmu_index(&cpu->env, false); + + if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, + MMU_DATA_LOAD, mmu_idx)) { + return -1; + } + return phys_addr; +} +#endif + +/* TODO: Add exeption support*/ +static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address, + int rw, int tlb_error) +{ +} + +bool tricore_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType rw, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + TriCoreCPU *cpu = TRICORE_CPU(cs); + CPUTriCoreState *env = &cpu->env; + hwaddr physical; + int prot; + int ret = 0; + + rw &= 1; + ret = get_physical_address(env, &physical, &prot, + address, rw, mmu_idx); + + // qemu_log_mask(CPU_LOG_MMU, "%s address=" TARGET_FMT_lx " ret %d physical " + // TARGET_FMT_plx " prot %d\n", + // __func__, (target_ulong)address, ret, physical, prot); + + if (ret == TLBRET_MATCH) { + tlb_set_page(cs, address & TARGET_PAGE_MASK, + physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, + mmu_idx, TARGET_PAGE_SIZE); + return true; + } else { + assert(ret < 0); + if (probe) { + return false; + } + raise_mmu_exception(env, address, rw, ret); + cpu_loop_exit_restore(cs, retaddr); + } +} + +#if 0 +static void tricore_cpu_list_entry(gpointer data, gpointer user_data) +{ + ObjectClass *oc = data; + const char *typename; + char *name; + + typename = object_class_get_name(oc); + name = g_strndup(typename, strlen(typename) - strlen("-" TYPE_TRICORE_CPU)); + qemu_printf(" %s\n", name); + g_free(name); +} +#endif + +#if 0 +void tricore_cpu_list(void) +{ + GSList *list; + + list = object_class_get_list_sorted(TYPE_TRICORE_CPU, false); + qemu_printf("Available CPUs:\n"); + g_slist_foreach(list, tricore_cpu_list_entry, NULL); + g_slist_free(list); +} +#endif + +void fpu_set_state(CPUTriCoreState *env) +{ + set_float_rounding_mode(env->PSW & MASK_PSW_FPU_RM, &env->fp_status); + set_flush_inputs_to_zero(1, &env->fp_status); + set_flush_to_zero(1, &env->fp_status); + set_default_nan_mode(1, &env->fp_status); +} + +uint32_t psw_read(CPUTriCoreState *env) +{ + /* clear all USB bits */ + env->PSW &= 0x6ffffff; + /* now set them from the cache */ + env->PSW |= ((env->PSW_USB_C != 0) << 31); + env->PSW |= ((env->PSW_USB_V & (1 << 31)) >> 1); + env->PSW |= ((env->PSW_USB_SV & (1 << 31)) >> 2); + env->PSW |= ((env->PSW_USB_AV & (1 << 31)) >> 3); + env->PSW |= ((env->PSW_USB_SAV & (1 << 31)) >> 4); + + return env->PSW; +} + +void psw_write(CPUTriCoreState *env, uint32_t val) +{ + env->PSW_USB_C = (val & MASK_USB_C); + env->PSW_USB_V = (val & MASK_USB_V) << 1; + env->PSW_USB_SV = (val & MASK_USB_SV) << 2; + env->PSW_USB_AV = (val & MASK_USB_AV) << 3; + env->PSW_USB_SAV = (val & MASK_USB_SAV) << 4; + env->PSW = val; + + fpu_set_state(env); +} diff --git a/qemu/target/tricore/helper.h b/qemu/target/tricore/helper.h new file mode 100644 index 00000000..e1ee5fa0 --- /dev/null +++ b/qemu/target/tricore/helper.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + Modified for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64) +DEF_HELPER_6(uc_traceopcode, void, ptr, i64, i64, i32, ptr, i64) + +/* Arithmetic */ +DEF_HELPER_3(add_ssov, i32, env, i32, i32) +DEF_HELPER_3(add64_ssov, i64, env, i64, i64) +DEF_HELPER_3(add_suov, i32, env, i32, i32) +DEF_HELPER_3(add_h_ssov, i32, env, i32, i32) +DEF_HELPER_3(add_h_suov, i32, env, i32, i32) +DEF_HELPER_4(addr_h_ssov, i32, env, i64, i32, i32) +DEF_HELPER_4(addsur_h_ssov, i32, env, i64, i32, i32) +DEF_HELPER_3(sub_ssov, i32, env, i32, i32) +DEF_HELPER_3(sub64_ssov, i64, env, i64, i64) +DEF_HELPER_3(sub_suov, i32, env, i32, i32) +DEF_HELPER_3(sub_h_ssov, i32, env, i32, i32) +DEF_HELPER_3(sub_h_suov, i32, env, i32, i32) +DEF_HELPER_4(subr_h_ssov, i32, env, i64, i32, i32) +DEF_HELPER_4(subadr_h_ssov, i32, env, i64, i32, i32) +DEF_HELPER_3(mul_ssov, i32, env, i32, i32) +DEF_HELPER_3(mul_suov, i32, env, i32, i32) +DEF_HELPER_3(sha_ssov, i32, env, i32, i32) +DEF_HELPER_3(absdif_ssov, i32, env, i32, i32) +DEF_HELPER_4(madd32_ssov, i32, env, i32, i32, i32) +DEF_HELPER_4(madd32_suov, i32, env, i32, i32, i32) +DEF_HELPER_4(madd64_ssov, i64, env, i32, i64, i32) +DEF_HELPER_5(madd64_q_ssov, i64, env, i64, i32, i32, i32) +DEF_HELPER_3(madd32_q_add_ssov, i32, env, i64, i64) +DEF_HELPER_5(maddr_q_ssov, i32, env, i32, i32, i32, i32) +DEF_HELPER_4(madd64_suov, i64, env, i32, i64, i32) +DEF_HELPER_4(msub32_ssov, i32, env, i32, i32, i32) +DEF_HELPER_4(msub32_suov, i32, env, i32, i32, i32) +DEF_HELPER_4(msub64_ssov, i64, env, i32, i64, i32) +DEF_HELPER_5(msub64_q_ssov, i64, env, i64, i32, i32, i32) +DEF_HELPER_3(msub32_q_sub_ssov, i32, env, i64, i64) +DEF_HELPER_5(msubr_q_ssov, i32, env, i32, i32, i32, i32) +DEF_HELPER_4(msub64_suov, i64, env, i32, i64, i32) +DEF_HELPER_3(absdif_h_ssov, i32, env, i32, i32) +DEF_HELPER_2(abs_ssov, i32, env, i32) +DEF_HELPER_2(abs_h_ssov, i32, env, i32) +/* hword/byte arithmetic */ +DEF_HELPER_2(abs_b, i32, env, i32) +DEF_HELPER_2(abs_h, i32, env, i32) +DEF_HELPER_3(absdif_b, i32, env, i32, i32) +DEF_HELPER_3(absdif_h, i32, env, i32, i32) +DEF_HELPER_4(addr_h, i32, env, i64, i32, i32) +DEF_HELPER_4(addsur_h, i32, env, i64, i32, i32) +DEF_HELPER_5(maddr_q, i32, env, i32, i32, i32, i32) +DEF_HELPER_3(add_b, i32, env, i32, i32) +DEF_HELPER_3(add_h, i32, env, i32, i32) +DEF_HELPER_3(sub_b, i32, env, i32, i32) +DEF_HELPER_3(sub_h, i32, env, i32, i32) +DEF_HELPER_4(subr_h, i32, env, i64, i32, i32) +DEF_HELPER_4(subadr_h, i32, env, i64, i32, i32) +DEF_HELPER_5(msubr_q, i32, env, i32, i32, i32, i32) +DEF_HELPER_FLAGS_2(eq_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(eq_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(eqany_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(eqany_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(lt_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(lt_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(lt_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(lt_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(max_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(max_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(max_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(max_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(ixmax, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_FLAGS_2(ixmax_u, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_FLAGS_2(min_b, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(min_bu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(min_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(min_hu, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(ixmin, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_FLAGS_2(ixmin_u, TCG_CALL_NO_RWG_SE, i64, i64, i32) +/* count leading ... */ +DEF_HELPER_FLAGS_1(clo_h, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(clz_h, TCG_CALL_NO_RWG_SE, i32, i32) +DEF_HELPER_FLAGS_1(cls_h, TCG_CALL_NO_RWG_SE, i32, i32) +/* sh */ +DEF_HELPER_FLAGS_2(sh, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_2(sh_h, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_3(sha, i32, env, i32, i32) +DEF_HELPER_2(sha_h, i32, i32, i32) +/* merge/split/parity */ +DEF_HELPER_FLAGS_2(bmerge, TCG_CALL_NO_RWG_SE, i32, i32, i32) +DEF_HELPER_FLAGS_1(bsplit, TCG_CALL_NO_RWG_SE, i64, i32) +DEF_HELPER_FLAGS_1(parity, TCG_CALL_NO_RWG_SE, i32, i32) +/* float */ +DEF_HELPER_FLAGS_4(pack, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32, i32) +DEF_HELPER_1(unpack, i64, i32) +DEF_HELPER_3(fadd, i32, env, i32, i32) +DEF_HELPER_3(fsub, i32, env, i32, i32) +DEF_HELPER_3(fmul, i32, env, i32, i32) +DEF_HELPER_3(fdiv, i32, env, i32, i32) +DEF_HELPER_4(fmadd, i32, env, i32, i32, i32) +DEF_HELPER_4(fmsub, i32, env, i32, i32, i32) +DEF_HELPER_3(fcmp, i32, env, i32, i32) +DEF_HELPER_2(qseed, i32, env, i32) +DEF_HELPER_2(ftoi, i32, env, i32) +DEF_HELPER_2(itof, i32, env, i32) +DEF_HELPER_2(utof, i32, env, i32) +DEF_HELPER_2(ftoiz, i32, env, i32) +DEF_HELPER_2(ftouz, i32, env, i32) +DEF_HELPER_2(updfl, void, env, i32) +/* dvinit */ +DEF_HELPER_3(dvinit_b_13, i64, env, i32, i32) +DEF_HELPER_3(dvinit_b_131, i64, env, i32, i32) +DEF_HELPER_3(dvinit_h_13, i64, env, i32, i32) +DEF_HELPER_3(dvinit_h_131, i64, env, i32, i32) +DEF_HELPER_FLAGS_2(dvadj, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_FLAGS_2(dvstep, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_FLAGS_2(dvstep_u, TCG_CALL_NO_RWG_SE, i64, i64, i32) +DEF_HELPER_3(divide, i64, env, i32, i32) +DEF_HELPER_3(divide_u, i64, env, i32, i32) +/* mulh */ +DEF_HELPER_FLAGS_5(mul_h, TCG_CALL_NO_RWG_SE, i64, i32, i32, i32, i32, i32) +DEF_HELPER_FLAGS_5(mulm_h, TCG_CALL_NO_RWG_SE, i64, i32, i32, i32, i32, i32) +DEF_HELPER_FLAGS_5(mulr_h, TCG_CALL_NO_RWG_SE, i32, i32, i32, i32, i32, i32) +/* crc32 */ +DEF_HELPER_FLAGS_2(crc32, TCG_CALL_NO_RWG_SE, i32, i32, i32) +/* CSA */ +DEF_HELPER_2(call, void, env, i32) +DEF_HELPER_1(ret, void, env) +DEF_HELPER_2(bisr, void, env, i32) +DEF_HELPER_1(rfe, void, env) +DEF_HELPER_1(rfm, void, env) +DEF_HELPER_2(ldlcx, void, env, i32) +DEF_HELPER_2(lducx, void, env, i32) +DEF_HELPER_2(stlcx, void, env, i32) +DEF_HELPER_2(stucx, void, env, i32) +DEF_HELPER_1(svlcx, void, env) +DEF_HELPER_1(svucx, void, env) +DEF_HELPER_1(rslcx, void, env) +/* Address mode helper */ +DEF_HELPER_1(br_update, i32, i32) +DEF_HELPER_2(circ_update, i32, i32, i32) +/* PSW cache helper */ +DEF_HELPER_2(psw_write, void, env, i32) +DEF_HELPER_1(psw_read, i32, env) +/* Exceptions */ +DEF_HELPER_3(raise_exception_sync, noreturn, env, i32, i32) diff --git a/qemu/target/tricore/op_helper.c b/qemu/target/tricore/op_helper.c new file mode 100644 index 00000000..57ad816f --- /dev/null +++ b/qemu/target/tricore/op_helper.c @@ -0,0 +1,2795 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "cpu.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "qemu/crc32c.h" + + +/* Exception helpers */ + +static void QEMU_NORETURN +raise_exception_sync_internal(CPUTriCoreState *env, uint32_t class, int tin, + uintptr_t pc, uint32_t fcd_pc) +{ + CPUState *cs = env_cpu(env); + /* in case we come from a helper-call we need to restore the PC */ + cpu_restore_state(cs, pc, true); + + /* Tin is loaded into d[15] */ + env->gpr_d[15] = tin; + + if (class == TRAPC_CTX_MNG && tin == TIN3_FCU) { + /* upper context cannot be saved, if the context list is empty */ + } else { + helper_svucx(env); + } + + /* The return address in a[11] is updated */ + if (class == TRAPC_CTX_MNG && tin == TIN3_FCD) { + env->SYSCON |= MASK_SYSCON_FCD_SF; + /* when we run out of CSAs after saving a context a FCD trap is taken + and the return address is the start of the trap handler which used + the last CSA */ + env->gpr_a[11] = fcd_pc; + } else if (class == TRAPC_SYSCALL) { + env->gpr_a[11] = env->PC + 4; + } else { + env->gpr_a[11] = env->PC; + } + /* The stack pointer in A[10] is set to the Interrupt Stack Pointer (ISP) + when the processor was not previously using the interrupt stack + (in case of PSW.IS = 0). The stack pointer bit is set for using the + interrupt stack: PSW.IS = 1. */ + if ((env->PSW & MASK_PSW_IS) == 0) { + env->gpr_a[10] = env->ISP; + } + env->PSW |= MASK_PSW_IS; + /* The I/O mode is set to Supervisor mode, which means all permissions + are enabled: PSW.IO = 10 B .*/ + env->PSW |= (2 << 10); + + /*The current Protection Register Set is set to 0: PSW.PRS = 00 B .*/ + env->PSW &= ~MASK_PSW_PRS; + + /* The Call Depth Counter (CDC) is cleared, and the call depth limit is + set for 64: PSW.CDC = 0000000 B .*/ + env->PSW &= ~MASK_PSW_CDC; + + /* Call Depth Counter is enabled, PSW.CDE = 1. */ + env->PSW |= MASK_PSW_CDE; + + /* Write permission to global registers A[0], A[1], A[8], A[9] is + disabled: PSW.GW = 0. */ + env->PSW &= ~MASK_PSW_GW; + + /*The interrupt system is globally disabled: ICR.IE = 0. The ‘old’ + ICR.IE and ICR.CCPN are saved */ + + /* PCXI.PIE = ICR.IE */ + env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) + + ((env->ICR & MASK_ICR_IE_1_3) << 15)); + /* PCXI.PCPN = ICR.CCPN */ + env->PCXI = (env->PCXI & 0xffffff) + + ((env->ICR & MASK_ICR_CCPN) << 24); + /* Update PC using the trap vector table */ + env->PC = env->BTV | (class << 5); + + cpu_loop_exit(cs); +} + +void helper_raise_exception_sync(CPUTriCoreState *env, uint32_t class, + uint32_t tin) +{ + raise_exception_sync_internal(env, class, tin, 0, 0); +} + +static void raise_exception_sync_helper(CPUTriCoreState *env, uint32_t class, + uint32_t tin, uintptr_t pc) +{ + raise_exception_sync_internal(env, class, tin, pc, 0); +} + +/* Addressing mode helper */ + +static uint16_t reverse16(uint16_t val) +{ + uint8_t high = (uint8_t)(val >> 8); + uint8_t low = (uint8_t)(val & 0xff); + + uint16_t rh, rl; + + rl = (uint16_t)((high * 0x0202020202ULL & 0x010884422010ULL) % 1023); + rh = (uint16_t)((low * 0x0202020202ULL & 0x010884422010ULL) % 1023); + + return (rh << 8) | rl; +} + +uint32_t helper_br_update(uint32_t reg) +{ + uint32_t index = reg & 0xffff; + uint32_t incr = reg >> 16; + uint32_t new_index = reverse16(reverse16(index) + reverse16(incr)); + return reg - index + new_index; +} + +uint32_t helper_circ_update(uint32_t reg, uint32_t off) +{ + uint32_t index = reg & 0xffff; + uint32_t length = reg >> 16; + int32_t new_index = index + off; + if (new_index < 0) { + new_index += length; + } else { + new_index %= length; + } + return reg - index + new_index; +} + +static uint32_t ssov32(CPUTriCoreState *env, int64_t arg) +{ + uint32_t ret; + int64_t max_pos = INT32_MAX; + int64_t max_neg = INT32_MIN; + if (arg > max_pos) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + ret = (target_ulong)max_pos; + } else { + if (arg < max_neg) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + ret = (target_ulong)max_neg; + } else { + env->PSW_USB_V = 0; + ret = (target_ulong)arg; + } + } + env->PSW_USB_AV = arg ^ arg * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return ret; +} + +static uint32_t suov32_pos(CPUTriCoreState *env, uint64_t arg) +{ + uint32_t ret; + uint64_t max_pos = UINT32_MAX; + if (arg > max_pos) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + ret = (target_ulong)max_pos; + } else { + env->PSW_USB_V = 0; + ret = (target_ulong)arg; + } + env->PSW_USB_AV = arg ^ arg * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return ret; +} + +static uint32_t suov32_neg(CPUTriCoreState *env, int64_t arg) +{ + uint32_t ret; + + if (arg < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + ret = 0; + } else { + env->PSW_USB_V = 0; + ret = (target_ulong)arg; + } + env->PSW_USB_AV = arg ^ arg * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return ret; +} + +static uint32_t ssov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1) +{ + int32_t max_pos = INT16_MAX; + int32_t max_neg = INT16_MIN; + int32_t av0, av1; + + env->PSW_USB_V = 0; + av0 = hw0 ^ hw0 * 2u; + if (hw0 > max_pos) { + env->PSW_USB_V = (1 << 31); + hw0 = max_pos; + } else if (hw0 < max_neg) { + env->PSW_USB_V = (1 << 31); + hw0 = max_neg; + } + + av1 = hw1 ^ hw1 * 2u; + if (hw1 > max_pos) { + env->PSW_USB_V = (1 << 31); + hw1 = max_pos; + } else if (hw1 < max_neg) { + env->PSW_USB_V = (1 << 31); + hw1 = max_neg; + } + + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = (av0 | av1) << 16; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return (hw0 & 0xffff) | (hw1 << 16); +} + +static uint32_t suov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1) +{ + int32_t max_pos = UINT16_MAX; + int32_t av0, av1; + + env->PSW_USB_V = 0; + av0 = hw0 ^ hw0 * 2u; + if (hw0 > max_pos) { + env->PSW_USB_V = (1 << 31); + hw0 = max_pos; + } else if (hw0 < 0) { + env->PSW_USB_V = (1 << 31); + hw0 = 0; + } + + av1 = hw1 ^ hw1 * 2u; + if (hw1 > max_pos) { + env->PSW_USB_V = (1 << 31); + hw1 = max_pos; + } else if (hw1 < 0) { + env->PSW_USB_V = (1 << 31); + hw1 = 0; + } + + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = (av0 | av1) << 16; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return (hw0 & 0xffff) | (hw1 << 16); +} + +target_ulong helper_add_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t result = t1 + t2; + return ssov32(env, result); +} + +uint64_t helper_add64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) +{ + uint64_t result; + int64_t ovf; + + result = r1 + r2; + ovf = (result ^ r1) & ~(r1 ^ r2); + env->PSW_USB_AV = (result ^ result * 2u) >> 32; + env->PSW_USB_SAV |= env->PSW_USB_AV; + if (ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if ((int64_t)r1 >= 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + return result; +} + +target_ulong helper_add_h_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int32_t ret_hw0, ret_hw1; + + ret_hw0 = sextract32(r1, 0, 16) + sextract32(r2, 0, 16); + ret_hw1 = sextract32(r1, 16, 16) + sextract32(r2, 16, 16); + return ssov16(env, ret_hw0, ret_hw1); +} + +uint32_t helper_addr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low + mul_res0 + 0x8000; + result1 = r2_high + mul_res1 + 0x8000; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + if (result0 > INT32_MAX) { + ovf0 = (1 << 31); + result0 = INT32_MAX; + } else if (result0 < INT32_MIN) { + ovf0 = (1 << 31); + result0 = INT32_MIN; + } + + if (result1 > INT32_MAX) { + ovf1 = (1 << 31); + result1 = INT32_MAX; + } else if (result1 < INT32_MIN) { + ovf1 = (1 << 31); + result1 = INT32_MIN; + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_addsur_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low - mul_res0 + 0x8000; + result1 = r2_high + mul_res1 + 0x8000; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + if (result0 > INT32_MAX) { + ovf0 = (1 << 31); + result0 = INT32_MAX; + } else if (result0 < INT32_MIN) { + ovf0 = (1 << 31); + result0 = INT32_MIN; + } + + if (result1 > INT32_MAX) { + ovf1 = (1 << 31); + result1 = INT32_MAX; + } else if (result1 < INT32_MIN) { + ovf1 = (1 << 31); + result1 = INT32_MIN; + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + + +target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = extract64(r1, 0, 32); + int64_t t2 = extract64(r2, 0, 32); + int64_t result = t1 + t2; + return suov32_pos(env, result); +} + +target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int32_t ret_hw0, ret_hw1; + + ret_hw0 = extract32(r1, 0, 16) + extract32(r2, 0, 16); + ret_hw1 = extract32(r1, 16, 16) + extract32(r2, 16, 16); + return suov16(env, ret_hw0, ret_hw1); +} + +target_ulong helper_sub_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t result = t1 - t2; + return ssov32(env, result); +} + +uint64_t helper_sub64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) +{ + uint64_t result; + int64_t ovf; + + result = r1 - r2; + ovf = (result ^ r1) & (r1 ^ r2); + env->PSW_USB_AV = (result ^ result * 2u) >> 32; + env->PSW_USB_SAV |= env->PSW_USB_AV; + if (ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if ((int64_t)r1 >= 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + return result; +} + +target_ulong helper_sub_h_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int32_t ret_hw0, ret_hw1; + + ret_hw0 = sextract32(r1, 0, 16) - sextract32(r2, 0, 16); + ret_hw1 = sextract32(r1, 16, 16) - sextract32(r2, 16, 16); + return ssov16(env, ret_hw0, ret_hw1); +} + +uint32_t helper_subr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low - mul_res0 + 0x8000; + result1 = r2_high - mul_res1 + 0x8000; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + if (result0 > INT32_MAX) { + ovf0 = (1 << 31); + result0 = INT32_MAX; + } else if (result0 < INT32_MIN) { + ovf0 = (1 << 31); + result0 = INT32_MIN; + } + + if (result1 > INT32_MAX) { + ovf1 = (1 << 31); + result1 = INT32_MAX; + } else if (result1 < INT32_MIN) { + ovf1 = (1 << 31); + result1 = INT32_MIN; + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_subadr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low + mul_res0 + 0x8000; + result1 = r2_high - mul_res1 + 0x8000; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + if (result0 > INT32_MAX) { + ovf0 = (1 << 31); + result0 = INT32_MAX; + } else if (result0 < INT32_MIN) { + ovf0 = (1 << 31); + result0 = INT32_MIN; + } + + if (result1 > INT32_MAX) { + ovf1 = (1 << 31); + result1 = INT32_MAX; + } else if (result1 < INT32_MIN) { + ovf1 = (1 << 31); + result1 = INT32_MIN; + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = extract64(r1, 0, 32); + int64_t t2 = extract64(r2, 0, 32); + int64_t result = t1 - t2; + return suov32_neg(env, result); +} + +target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int32_t ret_hw0, ret_hw1; + + ret_hw0 = extract32(r1, 0, 16) - extract32(r2, 0, 16); + ret_hw1 = extract32(r1, 16, 16) - extract32(r2, 16, 16); + return suov16(env, ret_hw0, ret_hw1); +} + +target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t result = t1 * t2; + return ssov32(env, result); +} + +target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = extract64(r1, 0, 32); + int64_t t2 = extract64(r2, 0, 32); + int64_t result = t1 * t2; + + return suov32_pos(env, result); +} + +target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = sextract64(r1, 0, 32); + int32_t t2 = sextract64(r2, 0, 6); + int64_t result; + if (t2 == 0) { + result = t1; + } else if (t2 > 0) { + result = t1 << t2; + } else { + result = t1 >> -t2; + } + return ssov32(env, result); +} + +uint32_t helper_abs_ssov(CPUTriCoreState *env, target_ulong r1) +{ + target_ulong result; + result = ((int32_t)r1 >= 0) ? r1 : (0 - r1); + return ssov32(env, result); +} + +uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1) +{ + int32_t ret_h0, ret_h1; + + ret_h0 = sextract32(r1, 0, 16); + ret_h0 = (ret_h0 >= 0) ? ret_h0 : (0 - ret_h0); + + ret_h1 = sextract32(r1, 16, 16); + ret_h1 = (ret_h1 >= 0) ? ret_h1 : (0 - ret_h1); + + return ssov16(env, ret_h0, ret_h1); +} + +target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t result; + + if (t1 > t2) { + result = t1 - t2; + } else { + result = t2 - t1; + } + return ssov32(env, result); +} + +uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2) +{ + int32_t t1, t2; + int32_t ret_h0, ret_h1; + + t1 = sextract32(r1, 0, 16); + t2 = sextract32(r2, 0, 16); + if (t1 > t2) { + ret_h0 = t1 - t2; + } else { + ret_h0 = t2 - t1; + } + + t1 = sextract32(r1, 16, 16); + t2 = sextract32(r2, 16, 16); + if (t1 > t2) { + ret_h1 = t1 - t2; + } else { + ret_h1 = t2 - t1; + } + + return ssov16(env, ret_h0, ret_h1); +} + +target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2, target_ulong r3) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t result; + + result = t2 + (t1 * t3); + return ssov32(env, result); +} + +target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2, target_ulong r3) +{ + uint64_t t1 = extract64(r1, 0, 32); + uint64_t t2 = extract64(r2, 0, 32); + uint64_t t3 = extract64(r3, 0, 32); + int64_t result; + + result = t2 + (t1 * t3); + return suov32_pos(env, result); +} + +uint64_t helper_madd64_ssov(CPUTriCoreState *env, target_ulong r1, + uint64_t r2, target_ulong r3) +{ + uint64_t ret, ovf; + int64_t t1 = sextract64(r1, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul; + + mul = t1 * t3; + ret = mul + r2; + ovf = (ret ^ mul) & ~(mul ^ r2); + + t1 = ret >> 32; + env->PSW_USB_AV = t1 ^ t1 * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if ((int64_t)ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul >= 0) { + ret = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + ret = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + + return ret; +} + +uint32_t +helper_madd32_q_add_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) +{ + int64_t result; + + result = (r1 + r2); + + env->PSW_USB_AV = (result ^ result * 2u); + env->PSW_USB_SAV |= env->PSW_USB_AV; + + /* we do the saturation by hand, since we produce an overflow on the host + if the mul before was (0x80000000 * 0x80000000) << 1). If this is the + case, we flip the saturated value. */ + if (r2 == 0x8000000000000000LL) { + if (result > 0x7fffffffLL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MIN; + } else if (result < -0x80000000LL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MAX; + } else { + env->PSW_USB_V = 0; + } + } else { + if (result > 0x7fffffffLL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MAX; + } else if (result < -0x80000000LL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MIN; + } else { + env->PSW_USB_V = 0; + } + } + return (uint32_t)result; +} + +uint64_t helper_madd64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = (int64_t)r1; + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t result, mul; + int64_t ovf; + + mul = (t2 * t3) << n; + result = mul + t1; + + env->PSW_USB_AV = (result ^ result * 2u) >> 32; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + ovf = (result ^ mul) & ~(mul ^ t1); + /* we do the saturation by hand, since we produce an overflow on the host + if the mul was (0x80000000 * 0x80000000) << 1). If this is the + case, we flip the saturated value. */ + if ((r2 == 0x80000000) && (r3 == 0x80000000) && (n == 1)) { + if (ovf >= 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul < 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + } else { + if (ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul >= 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + } + return (uint64_t)result; +} + +uint32_t helper_maddr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul, ret; + + if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { + mul = 0x7fffffff; + } else { + mul = (t2 * t3) << n; + } + + ret = t1 + mul + 0x8000; + + env->PSW_USB_AV = ret ^ ret * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if (ret > 0x7fffffffll) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + ret = INT32_MAX; + } else if (ret < -0x80000000ll) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + ret = INT32_MIN; + } else { + env->PSW_USB_V = 0; + } + return ret & 0xffff0000ll; +} + +uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1, + uint64_t r2, target_ulong r3) +{ + uint64_t ret, mul; + uint64_t t1 = extract64(r1, 0, 32); + uint64_t t3 = extract64(r3, 0, 32); + + mul = t1 * t3; + ret = mul + r2; + + t1 = ret >> 32; + env->PSW_USB_AV = t1 ^ t1 * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if (ret < r2) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* saturate */ + ret = UINT64_MAX; + } else { + env->PSW_USB_V = 0; + } + return ret; +} + +target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2, target_ulong r3) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t result; + + result = t2 - (t1 * t3); + return ssov32(env, result); +} + +target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1, + target_ulong r2, target_ulong r3) +{ + uint64_t t1 = extract64(r1, 0, 32); + uint64_t t2 = extract64(r2, 0, 32); + uint64_t t3 = extract64(r3, 0, 32); + uint64_t result; + uint64_t mul; + + mul = (t1 * t3); + result = t2 - mul; + + env->PSW_USB_AV = result ^ result * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + /* we calculate ovf by hand here, because the multiplication can overflow on + the host, which would give false results if we compare to less than + zero */ + if (mul > t2) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = 0; + } else { + env->PSW_USB_V = 0; + } + return result; +} + +uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1, + uint64_t r2, target_ulong r3) +{ + uint64_t ret, ovf; + int64_t t1 = sextract64(r1, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul; + + mul = t1 * t3; + ret = r2 - mul; + ovf = (ret ^ r2) & (mul ^ r2); + + t1 = ret >> 32; + env->PSW_USB_AV = t1 ^ t1 * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if ((int64_t)ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul < 0) { + ret = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + ret = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + return ret; +} + +uint64_t helper_msub64_suov(CPUTriCoreState *env, target_ulong r1, + uint64_t r2, target_ulong r3) +{ + uint64_t ret, mul; + uint64_t t1 = extract64(r1, 0, 32); + uint64_t t3 = extract64(r3, 0, 32); + + mul = t1 * t3; + ret = r2 - mul; + + t1 = ret >> 32; + env->PSW_USB_AV = t1 ^ t1 * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if (ret > r2) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* saturate */ + ret = 0; + } else { + env->PSW_USB_V = 0; + } + return ret; +} + +uint32_t +helper_msub32_q_sub_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) +{ + int64_t result; + int64_t t1 = (int64_t)r1; + int64_t t2 = (int64_t)r2; + + result = t1 - t2; + + env->PSW_USB_AV = (result ^ result * 2u); + env->PSW_USB_SAV |= env->PSW_USB_AV; + + /* we do the saturation by hand, since we produce an overflow on the host + if the mul before was (0x80000000 * 0x80000000) << 1). If this is the + case, we flip the saturated value. */ + if (r2 == 0x8000000000000000LL) { + if (result > 0x7fffffffLL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MIN; + } else if (result < -0x80000000LL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MAX; + } else { + env->PSW_USB_V = 0; + } + } else { + if (result > 0x7fffffffLL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MAX; + } else if (result < -0x80000000LL) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + result = INT32_MIN; + } else { + env->PSW_USB_V = 0; + } + } + return (uint32_t)result; +} + +uint64_t helper_msub64_q_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = (int64_t)r1; + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t result, mul; + int64_t ovf; + + mul = (t2 * t3) << n; + result = t1 - mul; + + env->PSW_USB_AV = (result ^ result * 2u) >> 32; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + ovf = (result ^ t1) & (t1 ^ mul); + /* we do the saturation by hand, since we produce an overflow on the host + if the mul before was (0x80000000 * 0x80000000) << 1). If this is the + case, we flip the saturated value. */ + if (mul == 0x8000000000000000LL) { + if (ovf >= 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul >= 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + } else { + if (ovf < 0) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV = (1 << 31); + /* ext_ret > MAX_INT */ + if (mul < 0) { + result = INT64_MAX; + /* ext_ret < MIN_INT */ + } else { + result = INT64_MIN; + } + } else { + env->PSW_USB_V = 0; + } + } + + return (uint64_t)result; +} + +uint32_t helper_msubr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul, ret; + + if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { + mul = 0x7fffffff; + } else { + mul = (t2 * t3) << n; + } + + ret = t1 - mul + 0x8000; + + env->PSW_USB_AV = ret ^ ret * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + if (ret > 0x7fffffffll) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + ret = INT32_MAX; + } else if (ret < -0x80000000ll) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + ret = INT32_MIN; + } else { + env->PSW_USB_V = 0; + } + return ret & 0xffff0000ll; +} + +uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg) +{ + int32_t b, i; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 4; i++) { + b = sextract32(arg, i * 8, 8); + b = (b >= 0) ? b : (0 - b); + ovf |= (b > 0x7F) || (b < -0x80); + avf |= b ^ b * 2u; + ret |= (b & 0xff) << (i * 8); + } + + env->PSW_USB_V = ovf << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 24; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg) +{ + int32_t h, i; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 2; i++) { + h = sextract32(arg, i * 16, 16); + h = (h >= 0) ? h : (0 - h); + ovf |= (h > 0x7FFF) || (h < -0x8000); + avf |= h ^ h * 2u; + ret |= (h & 0xffff) << (i * 16); + } + + env->PSW_USB_V = ovf << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 16; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t b, i; + int32_t extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 4; i++) { + extr_r2 = sextract32(r2, i * 8, 8); + b = sextract32(r1, i * 8, 8); + b = (b > extr_r2) ? (b - extr_r2) : (extr_r2 - b); + ovf |= (b > 0x7F) || (b < -0x80); + avf |= b ^ b * 2u; + ret |= (b & 0xff) << (i * 8); + } + + env->PSW_USB_V = ovf << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 24; + env->PSW_USB_SAV |= env->PSW_USB_AV; + return ret; +} + +uint32_t helper_absdif_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t h, i; + int32_t extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 2; i++) { + extr_r2 = sextract32(r2, i * 16, 16); + h = sextract32(r1, i * 16, 16); + h = (h > extr_r2) ? (h - extr_r2) : (extr_r2 - h); + ovf |= (h > 0x7FFF) || (h < -0x8000); + avf |= h ^ h * 2u; + ret |= (h & 0xffff) << (i * 16); + } + + env->PSW_USB_V = ovf << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 16; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_addr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low + mul_res0 + 0x8000; + result1 = r2_high + mul_res1 + 0x8000; + + if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { + ovf0 = (1 << 31); + } + + if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { + ovf1 = (1 << 31); + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_addsur_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low - mul_res0 + 0x8000; + result1 = r2_high + mul_res1 + 0x8000; + + if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { + ovf0 = (1 << 31); + } + + if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { + ovf1 = (1 << 31); + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_maddr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul, ret; + + if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { + mul = 0x7fffffff; + } else { + mul = (t2 * t3) << n; + } + + ret = t1 + mul + 0x8000; + + if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + } else { + env->PSW_USB_V = 0; + } + env->PSW_USB_AV = ret ^ ret * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret & 0xffff0000ll; +} + +uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t b, i; + int32_t extr_r1, extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + uint32_t ret = 0; + + for (i = 0; i < 4; i++) { + extr_r1 = sextract32(r1, i * 8, 8); + extr_r2 = sextract32(r2, i * 8, 8); + + b = extr_r1 + extr_r2; + ovf |= ((b > 0x7f) || (b < -0x80)); + avf |= b ^ b * 2u; + ret |= ((b & 0xff) << (i*8)); + } + + env->PSW_USB_V = (ovf << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 24; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_add_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t h, i; + int32_t extr_r1, extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 2; i++) { + extr_r1 = sextract32(r1, i * 16, 16); + extr_r2 = sextract32(r2, i * 16, 16); + h = extr_r1 + extr_r2; + ovf |= ((h > 0x7fff) || (h < -0x8000)); + avf |= h ^ h * 2u; + ret |= (h & 0xffff) << (i * 16); + } + + env->PSW_USB_V = (ovf << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = (avf << 16); + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_subr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low - mul_res0 + 0x8000; + result1 = r2_high - mul_res1 + 0x8000; + + if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { + ovf0 = (1 << 31); + } + + if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { + ovf1 = (1 << 31); + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_subadr_h(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, + uint32_t r2_h) +{ + int64_t mul_res0 = sextract64(r1, 0, 32); + int64_t mul_res1 = sextract64(r1, 32, 32); + int64_t r2_low = sextract64(r2_l, 0, 32); + int64_t r2_high = sextract64(r2_h, 0, 32); + int64_t result0, result1; + uint32_t ovf0, ovf1; + uint32_t avf0, avf1; + + ovf0 = ovf1 = 0; + + result0 = r2_low + mul_res0 + 0x8000; + result1 = r2_high - mul_res1 + 0x8000; + + if ((result0 > INT32_MAX) || (result0 < INT32_MIN)) { + ovf0 = (1 << 31); + } + + if ((result1 > INT32_MAX) || (result1 < INT32_MIN)) { + ovf1 = (1 << 31); + } + + env->PSW_USB_V = ovf0 | ovf1; + env->PSW_USB_SV |= env->PSW_USB_V; + + avf0 = result0 * 2u; + avf0 = result0 ^ avf0; + avf1 = result1 * 2u; + avf1 = result1 ^ avf1; + + env->PSW_USB_AV = avf0 | avf1; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); +} + +uint32_t helper_msubr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2, + uint32_t r3, uint32_t n) +{ + int64_t t1 = sextract64(r1, 0, 32); + int64_t t2 = sextract64(r2, 0, 32); + int64_t t3 = sextract64(r3, 0, 32); + int64_t mul, ret; + + if ((t2 == -0x8000ll) && (t3 == -0x8000ll) && (n == 1)) { + mul = 0x7fffffff; + } else { + mul = (t2 * t3) << n; + } + + ret = t1 - mul + 0x8000; + + if ((ret > 0x7fffffffll) || (ret < -0x80000000ll)) { + env->PSW_USB_V = (1 << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + } else { + env->PSW_USB_V = 0; + } + env->PSW_USB_AV = ret ^ ret * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret & 0xffff0000ll; +} + +uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t b, i; + int32_t extr_r1, extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + uint32_t ret = 0; + + for (i = 0; i < 4; i++) { + extr_r1 = sextract32(r1, i * 8, 8); + extr_r2 = sextract32(r2, i * 8, 8); + + b = extr_r1 - extr_r2; + ovf |= ((b > 0x7f) || (b < -0x80)); + avf |= b ^ b * 2u; + ret |= ((b & 0xff) << (i*8)); + } + + env->PSW_USB_V = (ovf << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 24; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t h, i; + int32_t extr_r1, extr_r2; + int32_t ovf = 0; + int32_t avf = 0; + int32_t ret = 0; + + for (i = 0; i < 2; i++) { + extr_r1 = sextract32(r1, i * 16, 16); + extr_r2 = sextract32(r2, i * 16, 16); + h = extr_r1 - extr_r2; + ovf |= ((h > 0x7fff) || (h < -0x8000)); + avf |= h ^ h * 2u; + ret |= (h & 0xffff) << (i * 16); + } + + env->PSW_USB_V = (ovf << 31); + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = avf << 16; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_eq_b(target_ulong r1, target_ulong r2) +{ + int32_t ret; + int32_t i, msk; + + ret = 0; + msk = 0xff; + for (i = 0; i < 4; i++) { + if ((r1 & msk) == (r2 & msk)) { + ret |= msk; + } + msk = msk << 8; + } + + return ret; +} + +uint32_t helper_eq_h(target_ulong r1, target_ulong r2) +{ + int32_t ret = 0; + + if ((r1 & 0xffff) == (r2 & 0xffff)) { + ret = 0xffff; + } + + if ((r1 & 0xffff0000) == (r2 & 0xffff0000)) { + ret |= 0xffff0000; + } + + return ret; +} + +uint32_t helper_eqany_b(target_ulong r1, target_ulong r2) +{ + int32_t i; + uint32_t ret = 0; + + for (i = 0; i < 4; i++) { + ret |= (sextract32(r1, i * 8, 8) == sextract32(r2, i * 8, 8)); + } + + return ret; +} + +uint32_t helper_eqany_h(target_ulong r1, target_ulong r2) +{ + uint32_t ret; + + ret = (sextract32(r1, 0, 16) == sextract32(r2, 0, 16)); + ret |= (sextract32(r1, 16, 16) == sextract32(r2, 16, 16)); + + return ret; +} + +uint32_t helper_lt_b(target_ulong r1, target_ulong r2) +{ + int32_t i; + uint32_t ret = 0; + + for (i = 0; i < 4; i++) { + if (sextract32(r1, i * 8, 8) < sextract32(r2, i * 8, 8)) { + ret |= (0xff << (i * 8)); + } + } + + return ret; +} + +uint32_t helper_lt_bu(target_ulong r1, target_ulong r2) +{ + int32_t i; + uint32_t ret = 0; + + for (i = 0; i < 4; i++) { + if (extract32(r1, i * 8, 8) < extract32(r2, i * 8, 8)) { + ret |= (0xff << (i * 8)); + } + } + + return ret; +} + +uint32_t helper_lt_h(target_ulong r1, target_ulong r2) +{ + uint32_t ret = 0; + + if (sextract32(r1, 0, 16) < sextract32(r2, 0, 16)) { + ret |= 0xffff; + } + + if (sextract32(r1, 16, 16) < sextract32(r2, 16, 16)) { + ret |= 0xffff0000; + } + + return ret; +} + +uint32_t helper_lt_hu(target_ulong r1, target_ulong r2) +{ + uint32_t ret = 0; + + if (extract32(r1, 0, 16) < extract32(r2, 0, 16)) { + ret |= 0xffff; + } + + if (extract32(r1, 16, 16) < extract32(r2, 16, 16)) { + ret |= 0xffff0000; + } + + return ret; +} + +#define EXTREMA_H_B(name, op) \ +uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \ +{ \ + int32_t i, extr_r1, extr_r2; \ + uint32_t ret = 0; \ + \ + for (i = 0; i < 4; i++) { \ + extr_r1 = sextract32(r1, i * 8, 8); \ + extr_r2 = sextract32(r2, i * 8, 8); \ + extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret |= (extr_r1 & 0xff) << (i * 8); \ + } \ + return ret; \ +} \ + \ +uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\ +{ \ + int32_t i; \ + uint32_t extr_r1, extr_r2; \ + uint32_t ret = 0; \ + \ + for (i = 0; i < 4; i++) { \ + extr_r1 = extract32(r1, i * 8, 8); \ + extr_r2 = extract32(r2, i * 8, 8); \ + extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret |= (extr_r1 & 0xff) << (i * 8); \ + } \ + return ret; \ +} \ + \ +uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \ +{ \ + int32_t extr_r1, extr_r2; \ + uint32_t ret = 0; \ + \ + extr_r1 = sextract32(r1, 0, 16); \ + extr_r2 = sextract32(r2, 0, 16); \ + ret = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret = ret & 0xffff; \ + \ + extr_r1 = sextract32(r1, 16, 16); \ + extr_r2 = sextract32(r2, 16, 16); \ + extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret |= extr_r1 << 16; \ + \ + return ret; \ +} \ + \ +uint32_t helper_##name ##_hu(target_ulong r1, target_ulong r2)\ +{ \ + uint32_t extr_r1, extr_r2; \ + uint32_t ret = 0; \ + \ + extr_r1 = extract32(r1, 0, 16); \ + extr_r2 = extract32(r2, 0, 16); \ + ret = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret = ret & 0xffff; \ + \ + extr_r1 = extract32(r1, 16, 16); \ + extr_r2 = extract32(r2, 16, 16); \ + extr_r1 = (extr_r1 op extr_r2) ? extr_r1 : extr_r2; \ + ret |= extr_r1 << (16); \ + \ + return ret; \ +} \ + \ +uint64_t helper_ix##name(uint64_t r1, uint32_t r2) \ +{ \ + int64_t r2l, r2h, r1hl; \ + uint64_t ret = 0; \ + \ + ret = ((r1 + 2) & 0xffff); \ + r2l = sextract64(r2, 0, 16); \ + r2h = sextract64(r2, 16, 16); \ + r1hl = sextract64(r1, 32, 16); \ + \ + if ((r2l op ## = r2h) && (r2l op r1hl)) { \ + ret |= (r2l & 0xffff) << 32; \ + ret |= extract64(r1, 0, 16) << 16; \ + } else if ((r2h op r2l) && (r2h op r1hl)) { \ + ret |= extract64(r2, 16, 16) << 32; \ + ret |= extract64(r1 + 1, 0, 16) << 16; \ + } else { \ + ret |= r1 & 0xffffffff0000ull; \ + } \ + return ret; \ +} \ + \ +uint64_t helper_ix##name ##_u(uint64_t r1, uint32_t r2) \ +{ \ + int64_t r2l, r2h, r1hl; \ + uint64_t ret = 0; \ + \ + ret = ((r1 + 2) & 0xffff); \ + r2l = extract64(r2, 0, 16); \ + r2h = extract64(r2, 16, 16); \ + r1hl = extract64(r1, 32, 16); \ + \ + if ((r2l op ## = r2h) && (r2l op r1hl)) { \ + ret |= (r2l & 0xffff) << 32; \ + ret |= extract64(r1, 0, 16) << 16; \ + } else if ((r2h op r2l) && (r2h op r1hl)) { \ + ret |= extract64(r2, 16, 16) << 32; \ + ret |= extract64(r1 + 1, 0, 16) << 16; \ + } else { \ + ret |= r1 & 0xffffffff0000ull; \ + } \ + return ret; \ +} + +EXTREMA_H_B(max, >) +EXTREMA_H_B(min, <) + +#undef EXTREMA_H_B + +uint32_t helper_clo_h(target_ulong r1) +{ + uint32_t ret_hw0 = extract32(r1, 0, 16); + uint32_t ret_hw1 = extract32(r1, 16, 16); + + ret_hw0 = clo32(ret_hw0 << 16); + ret_hw1 = clo32(ret_hw1 << 16); + + if (ret_hw0 > 16) { + ret_hw0 = 16; + } + if (ret_hw1 > 16) { + ret_hw1 = 16; + } + + return ret_hw0 | (ret_hw1 << 16); +} + +uint32_t helper_clz_h(target_ulong r1) +{ + uint32_t ret_hw0 = extract32(r1, 0, 16); + uint32_t ret_hw1 = extract32(r1, 16, 16); + + ret_hw0 = clz32(ret_hw0 << 16); + ret_hw1 = clz32(ret_hw1 << 16); + + if (ret_hw0 > 16) { + ret_hw0 = 16; + } + if (ret_hw1 > 16) { + ret_hw1 = 16; + } + + return ret_hw0 | (ret_hw1 << 16); +} + +uint32_t helper_cls_h(target_ulong r1) +{ + uint32_t ret_hw0 = extract32(r1, 0, 16); + uint32_t ret_hw1 = extract32(r1, 16, 16); + + ret_hw0 = clrsb32(ret_hw0 << 16); + ret_hw1 = clrsb32(ret_hw1 << 16); + + if (ret_hw0 > 15) { + ret_hw0 = 15; + } + if (ret_hw1 > 15) { + ret_hw1 = 15; + } + + return ret_hw0 | (ret_hw1 << 16); +} + +uint32_t helper_sh(target_ulong r1, target_ulong r2) +{ + int32_t shift_count = sextract32(r2, 0, 6); + + if (shift_count == -32) { + return 0; + } else if (shift_count < 0) { + return r1 >> -shift_count; + } else { + return r1 << shift_count; + } +} + +uint32_t helper_sh_h(target_ulong r1, target_ulong r2) +{ + int32_t ret_hw0, ret_hw1; + int32_t shift_count; + + shift_count = sextract32(r2, 0, 5); + + if (shift_count == -16) { + return 0; + } else if (shift_count < 0) { + ret_hw0 = extract32(r1, 0, 16) >> -shift_count; + ret_hw1 = extract32(r1, 16, 16) >> -shift_count; + return (ret_hw0 & 0xffff) | (ret_hw1 << 16); + } else { + ret_hw0 = extract32(r1, 0, 16) << shift_count; + ret_hw1 = extract32(r1, 16, 16) << shift_count; + return (ret_hw0 & 0xffff) | (ret_hw1 << 16); + } +} + +uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +{ + int32_t shift_count; + int64_t result, t1; + uint32_t ret; + + shift_count = sextract32(r2, 0, 6); + t1 = sextract32(r1, 0, 32); + + if (shift_count == 0) { + env->PSW_USB_C = env->PSW_USB_V = 0; + ret = r1; + } else if (shift_count == -32) { + env->PSW_USB_C = r1; + env->PSW_USB_V = 0; + ret = t1 >> 31; + } else if (shift_count > 0) { + result = t1 << shift_count; + /* calc carry */ + env->PSW_USB_C = ((result & 0xffffffff00000000ULL) != 0); + /* calc v */ + env->PSW_USB_V = (((result > 0x7fffffffLL) || + (result < -0x80000000LL)) << 31); + /* calc sv */ + env->PSW_USB_SV |= env->PSW_USB_V; + ret = (uint32_t)result; + } else { + env->PSW_USB_V = 0; + env->PSW_USB_C = (r1 & ((1 << -shift_count) - 1)); + ret = t1 >> -shift_count; + } + + env->PSW_USB_AV = ret ^ ret * 2u; + env->PSW_USB_SAV |= env->PSW_USB_AV; + + return ret; +} + +uint32_t helper_sha_h(target_ulong r1, target_ulong r2) +{ + int32_t shift_count; + int32_t ret_hw0, ret_hw1; + + shift_count = sextract32(r2, 0, 5); + + if (shift_count == 0) { + return r1; + } else if (shift_count < 0) { + ret_hw0 = sextract32(r1, 0, 16) >> -shift_count; + ret_hw1 = sextract32(r1, 16, 16) >> -shift_count; + return (ret_hw0 & 0xffff) | (ret_hw1 << 16); + } else { + ret_hw0 = sextract32(r1, 0, 16) << shift_count; + ret_hw1 = sextract32(r1, 16, 16) << shift_count; + return (ret_hw0 & 0xffff) | (ret_hw1 << 16); + } +} + +uint32_t helper_bmerge(target_ulong r1, target_ulong r2) +{ + uint32_t i, ret; + + ret = 0; + for (i = 0; i < 16; i++) { + ret |= (r1 & 1) << (2 * i + 1); + ret |= (r2 & 1) << (2 * i); + r1 = r1 >> 1; + r2 = r2 >> 1; + } + return ret; +} + +uint64_t helper_bsplit(uint32_t r1) +{ + int32_t i; + uint64_t ret; + + ret = 0; + for (i = 0; i < 32; i = i + 2) { + /* even */ + ret |= (r1 & 1) << (i/2); + r1 = r1 >> 1; + /* odd */ + ret |= (uint64_t)(r1 & 1) << (i/2 + 32); + r1 = r1 >> 1; + } + return ret; +} + +uint32_t helper_parity(target_ulong r1) +{ + uint32_t ret; + uint32_t nOnes, i; + + ret = 0; + nOnes = 0; + for (i = 0; i < 8; i++) { + ret ^= (r1 & 1); + r1 = r1 >> 1; + } + /* second byte */ + nOnes = 0; + for (i = 0; i < 8; i++) { + nOnes ^= (r1 & 1); + r1 = r1 >> 1; + } + ret |= nOnes << 8; + /* third byte */ + nOnes = 0; + for (i = 0; i < 8; i++) { + nOnes ^= (r1 & 1); + r1 = r1 >> 1; + } + ret |= nOnes << 16; + /* fourth byte */ + nOnes = 0; + for (i = 0; i < 8; i++) { + nOnes ^= (r1 & 1); + r1 = r1 >> 1; + } + ret |= nOnes << 24; + + return ret; +} + +uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high, + target_ulong r2) +{ + uint32_t ret; + int32_t fp_exp, fp_frac, temp_exp, fp_exp_frac; + int32_t int_exp = r1_high; + int32_t int_mant = r1_low; + uint32_t flag_rnd = (int_mant & (1 << 7)) && ( + (int_mant & (1 << 8)) || + (int_mant & 0x7f) || + (carry != 0)); + if (((int_mant & (1<<31)) == 0) && (int_exp == 255)) { + fp_exp = 255; + fp_frac = extract32(int_mant, 8, 23); + } else if ((int_mant & (1<<31)) && (int_exp >= 127)) { + fp_exp = 255; + fp_frac = 0; + } else if ((int_mant & (1<<31)) && (int_exp <= -128)) { + fp_exp = 0; + fp_frac = 0; + } else if (int_mant == 0) { + fp_exp = 0; + fp_frac = 0; + } else { + if (((int_mant & (1 << 31)) == 0)) { + temp_exp = 0; + } else { + temp_exp = int_exp + 128; + } + fp_exp_frac = (((temp_exp & 0xff) << 23) | + extract32(int_mant, 8, 23)) + + flag_rnd; + fp_exp = extract32(fp_exp_frac, 23, 8); + fp_frac = extract32(fp_exp_frac, 0, 23); + } + ret = r2 & (1 << 31); + ret = ret + (fp_exp << 23); + ret = ret + (fp_frac & 0x7fffff); + + return ret; +} + +uint64_t helper_unpack(target_ulong arg1) +{ + int32_t fp_exp = extract32(arg1, 23, 8); + int32_t fp_frac = extract32(arg1, 0, 23); + uint64_t ret; + int32_t int_exp, int_mant; + + if (fp_exp == 255) { + int_exp = 255; + int_mant = (fp_frac << 7); + } else if ((fp_exp == 0) && (fp_frac == 0)) { + int_exp = -127; + int_mant = 0; + } else if ((fp_exp == 0) && (fp_frac != 0)) { + int_exp = -126; + int_mant = (fp_frac << 7); + } else { + int_exp = fp_exp - 127; + int_mant = (fp_frac << 7); + int_mant |= (1 << 30); + } + ret = int_exp; + ret = ret << 32; + ret |= int_mant; + + return ret; +} + +uint64_t helper_dvinit_b_13(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint64_t ret; + int32_t abs_sig_dividend, abs_divisor; + + ret = sextract32(r1, 0, 32); + ret = ret << 24; + if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { + ret |= 0xffffff; + } + + abs_sig_dividend = abs((int32_t)r1) >> 8; + abs_divisor = abs((int32_t)r2); + /* calc overflow + ofv if (a/b >= 255) <=> (a/255 >= b) */ + env->PSW_USB_V = (abs_sig_dividend >= abs_divisor) << 31; + env->PSW_USB_V = env->PSW_USB_V << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + + return ret; +} + +uint64_t helper_dvinit_b_131(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint64_t ret = sextract32(r1, 0, 32); + + ret = ret << 24; + if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { + ret |= 0xffffff; + } + /* calc overflow */ + env->PSW_USB_V = ((r2 == 0) || ((r2 == 0xffffffff) && (r1 == 0xffffff80))); + env->PSW_USB_V = env->PSW_USB_V << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + + return ret; +} + +uint64_t helper_dvinit_h_13(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint64_t ret; + int32_t abs_sig_dividend, abs_divisor; + + ret = sextract32(r1, 0, 32); + ret = ret << 16; + if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { + ret |= 0xffff; + } + + abs_sig_dividend = abs((int32_t)r1) >> 16; + abs_divisor = abs((int32_t)r2); + /* calc overflow + ofv if (a/b >= 0xffff) <=> (a/0xffff >= b) */ + env->PSW_USB_V = (abs_sig_dividend >= abs_divisor) << 31; + env->PSW_USB_V = env->PSW_USB_V << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + + return ret; +} + +uint64_t helper_dvinit_h_131(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint64_t ret = sextract32(r1, 0, 32); + + ret = ret << 16; + if (!((r1 & 0x80000000) == (r2 & 0x80000000))) { + ret |= 0xffff; + } + /* calc overflow */ + env->PSW_USB_V = ((r2 == 0) || ((r2 == 0xffffffff) && (r1 == 0xffff8000))); + env->PSW_USB_V = env->PSW_USB_V << 31; + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + + return ret; +} + +uint64_t helper_dvadj(uint64_t r1, uint32_t r2) +{ + int32_t x_sign = (r1 >> 63); + int32_t q_sign = x_sign ^ (r2 >> 31); + int32_t eq_pos = x_sign & ((r1 >> 32) == r2); + int32_t eq_neg = x_sign & ((r1 >> 32) == -r2); + uint32_t quotient; + uint64_t remainder; + + if ((q_sign & ~eq_neg) | eq_pos) { + quotient = (r1 + 1) & 0xffffffff; + } else { + quotient = r1 & 0xffffffff; + } + + if (eq_pos | eq_neg) { + remainder = 0; + } else { + remainder = (r1 & 0xffffffff00000000ull); + } + return remainder | quotient; +} + +uint64_t helper_dvstep(uint64_t r1, uint32_t r2) +{ + int32_t dividend_sign = extract64(r1, 63, 1); + int32_t divisor_sign = extract32(r2, 31, 1); + int32_t quotient_sign = (dividend_sign != divisor_sign); + int32_t addend, dividend_quotient, remainder; + int32_t i, temp; + + if (quotient_sign) { + addend = r2; + } else { + addend = -r2; + } + dividend_quotient = (int32_t)r1; + remainder = (int32_t)(r1 >> 32); + + for (i = 0; i < 8; i++) { + remainder = (remainder << 1) | extract32(dividend_quotient, 31, 1); + dividend_quotient <<= 1; + temp = remainder + addend; + if ((temp < 0) == dividend_sign) { + remainder = temp; + } + if (((temp < 0) == dividend_sign)) { + dividend_quotient = dividend_quotient | !quotient_sign; + } else { + dividend_quotient = dividend_quotient | quotient_sign; + } + } + return ((uint64_t)remainder << 32) | (uint32_t)dividend_quotient; +} + +uint64_t helper_dvstep_u(uint64_t r1, uint32_t r2) +{ + int32_t dividend_quotient = extract64(r1, 0, 32); + int64_t remainder = extract64(r1, 32, 32); + int32_t i; + int64_t temp; + for (i = 0; i < 8; i++) { + remainder = (remainder << 1) | extract32(dividend_quotient, 31, 1); + dividend_quotient <<= 1; + temp = (remainder & 0xffffffff) - r2; + if (temp >= 0) { + remainder = temp; + } + dividend_quotient = dividend_quotient | !(temp < 0); + } + return ((uint64_t)remainder << 32) | (uint32_t)dividend_quotient; +} + +uint64_t helper_divide(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + int32_t quotient, remainder; + int32_t dividend = (int32_t)r1; + int32_t divisor = (int32_t)r2; + + if (divisor == 0) { + if (dividend >= 0) { + quotient = 0x7fffffff; + remainder = 0; + } else { + quotient = 0x80000000; + remainder = 0; + } + env->PSW_USB_V = (1 << 31); + } else if ((divisor == 0xffffffff) && (dividend == 0x80000000)) { + quotient = 0x7fffffff; + remainder = 0; + env->PSW_USB_V = (1 << 31); + } else { + remainder = dividend % divisor; + quotient = (dividend - remainder)/divisor; + env->PSW_USB_V = 0; + } + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + return ((uint64_t)remainder << 32) | (uint32_t)quotient; +} + +uint64_t helper_divide_u(CPUTriCoreState *env, uint32_t r1, uint32_t r2) +{ + uint32_t quotient, remainder; + uint32_t dividend = r1; + uint32_t divisor = r2; + + if (divisor == 0) { + quotient = 0xffffffff; + remainder = 0; + env->PSW_USB_V = (1 << 31); + } else { + remainder = dividend % divisor; + quotient = (dividend - remainder)/divisor; + env->PSW_USB_V = 0; + } + env->PSW_USB_SV |= env->PSW_USB_V; + env->PSW_USB_AV = 0; + return ((uint64_t)remainder << 32) | quotient; +} + +uint64_t helper_mul_h(uint32_t arg00, uint32_t arg01, + uint32_t arg10, uint32_t arg11, uint32_t n) +{ + uint32_t result0, result1; + + int32_t sc1 = ((arg00 & 0xffff) == 0x8000) && + ((arg10 & 0xffff) == 0x8000) && (n == 1); + int32_t sc0 = ((arg01 & 0xffff) == 0x8000) && + ((arg11 & 0xffff) == 0x8000) && (n == 1); + if (sc1) { + result1 = 0x7fffffff; + } else { + result1 = (((uint32_t)(arg00 * arg10)) << n); + } + if (sc0) { + result0 = 0x7fffffff; + } else { + result0 = (((uint32_t)(arg01 * arg11)) << n); + } + return (((uint64_t)result1 << 32)) | result0; +} + +uint64_t helper_mulm_h(uint32_t arg00, uint32_t arg01, + uint32_t arg10, uint32_t arg11, uint32_t n) +{ + uint64_t ret; + int64_t result0, result1; + + int32_t sc1 = ((arg00 & 0xffff) == 0x8000) && + ((arg10 & 0xffff) == 0x8000) && (n == 1); + int32_t sc0 = ((arg01 & 0xffff) == 0x8000) && + ((arg11 & 0xffff) == 0x8000) && (n == 1); + + if (sc1) { + result1 = 0x7fffffff; + } else { + result1 = (((int32_t)arg00 * (int32_t)arg10) << n); + } + if (sc0) { + result0 = 0x7fffffff; + } else { + result0 = (((int32_t)arg01 * (int32_t)arg11) << n); + } + ret = (result1 + result0); + ret = ret << 16; + return ret; +} +uint32_t helper_mulr_h(uint32_t arg00, uint32_t arg01, + uint32_t arg10, uint32_t arg11, uint32_t n) +{ + uint32_t result0, result1; + + int32_t sc1 = ((arg00 & 0xffff) == 0x8000) && + ((arg10 & 0xffff) == 0x8000) && (n == 1); + int32_t sc0 = ((arg01 & 0xffff) == 0x8000) && + ((arg11 & 0xffff) == 0x8000) && (n == 1); + + if (sc1) { + result1 = 0x7fffffff; + } else { + result1 = ((arg00 * arg10) << n) + 0x8000; + } + if (sc0) { + result0 = 0x7fffffff; + } else { + result0 = ((arg01 * arg11) << n) + 0x8000; + } + return (result1 & 0xffff0000) | (result0 >> 16); +} + +uint32_t helper_crc32(uint32_t arg0, uint32_t arg1) +{ + uint8_t buf[4]; + stl_be_p(buf, arg0); + + return crc32(arg1, buf, 4); +} + +/* context save area (CSA) related helpers */ + +static int cdc_increment(target_ulong *psw) +{ + if ((*psw & MASK_PSW_CDC) == 0x7f) { + return 0; + } + + (*psw)++; + /* check for overflow */ + int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); + int mask = (1u << (7 - lo)) - 1; + int count = *psw & mask; + if (count == 0) { + (*psw)--; + return 1; + } + return 0; +} + +static int cdc_decrement(target_ulong *psw) +{ + if ((*psw & MASK_PSW_CDC) == 0x7f) { + return 0; + } + /* check for underflow */ + int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); + int mask = (1u << (7 - lo)) - 1; + int count = *psw & mask; + if (count == 0) { + return 1; + } + (*psw)--; + return 0; +} + +static bool cdc_zero(target_ulong *psw) +{ + int cdc = *psw & MASK_PSW_CDC; + /* Returns TRUE if PSW.CDC.COUNT == 0 or if PSW.CDC == + 7'b1111111, otherwise returns FALSE. */ + if (cdc == 0x7f) { + return true; + } + /* find CDC.COUNT */ + int lo = clo32((*psw & MASK_PSW_CDC) << (32 - 7)); + int mask = (1u << (7 - lo)) - 1; + int count = *psw & mask; + return count == 0; +} + +static void save_context_upper(CPUTriCoreState *env, int ea) +{ + cpu_stl_data(env, ea, env->PCXI); + cpu_stl_data(env, ea+4, psw_read(env)); + cpu_stl_data(env, ea+8, env->gpr_a[10]); + cpu_stl_data(env, ea+12, env->gpr_a[11]); + cpu_stl_data(env, ea+16, env->gpr_d[8]); + cpu_stl_data(env, ea+20, env->gpr_d[9]); + cpu_stl_data(env, ea+24, env->gpr_d[10]); + cpu_stl_data(env, ea+28, env->gpr_d[11]); + cpu_stl_data(env, ea+32, env->gpr_a[12]); + cpu_stl_data(env, ea+36, env->gpr_a[13]); + cpu_stl_data(env, ea+40, env->gpr_a[14]); + cpu_stl_data(env, ea+44, env->gpr_a[15]); + cpu_stl_data(env, ea+48, env->gpr_d[12]); + cpu_stl_data(env, ea+52, env->gpr_d[13]); + cpu_stl_data(env, ea+56, env->gpr_d[14]); + cpu_stl_data(env, ea+60, env->gpr_d[15]); +} + +static void save_context_lower(CPUTriCoreState *env, int ea) +{ + cpu_stl_data(env, ea, env->PCXI); + cpu_stl_data(env, ea+4, env->gpr_a[11]); + cpu_stl_data(env, ea+8, env->gpr_a[2]); + cpu_stl_data(env, ea+12, env->gpr_a[3]); + cpu_stl_data(env, ea+16, env->gpr_d[0]); + cpu_stl_data(env, ea+20, env->gpr_d[1]); + cpu_stl_data(env, ea+24, env->gpr_d[2]); + cpu_stl_data(env, ea+28, env->gpr_d[3]); + cpu_stl_data(env, ea+32, env->gpr_a[4]); + cpu_stl_data(env, ea+36, env->gpr_a[5]); + cpu_stl_data(env, ea+40, env->gpr_a[6]); + cpu_stl_data(env, ea+44, env->gpr_a[7]); + cpu_stl_data(env, ea+48, env->gpr_d[4]); + cpu_stl_data(env, ea+52, env->gpr_d[5]); + cpu_stl_data(env, ea+56, env->gpr_d[6]); + cpu_stl_data(env, ea+60, env->gpr_d[7]); +} + +static void restore_context_upper(CPUTriCoreState *env, int ea, + target_ulong *new_PCXI, target_ulong *new_PSW) +{ + *new_PCXI = cpu_ldl_data(env, ea); + *new_PSW = cpu_ldl_data(env, ea+4); + env->gpr_a[10] = cpu_ldl_data(env, ea+8); + env->gpr_a[11] = cpu_ldl_data(env, ea+12); + env->gpr_d[8] = cpu_ldl_data(env, ea+16); + env->gpr_d[9] = cpu_ldl_data(env, ea+20); + env->gpr_d[10] = cpu_ldl_data(env, ea+24); + env->gpr_d[11] = cpu_ldl_data(env, ea+28); + env->gpr_a[12] = cpu_ldl_data(env, ea+32); + env->gpr_a[13] = cpu_ldl_data(env, ea+36); + env->gpr_a[14] = cpu_ldl_data(env, ea+40); + env->gpr_a[15] = cpu_ldl_data(env, ea+44); + env->gpr_d[12] = cpu_ldl_data(env, ea+48); + env->gpr_d[13] = cpu_ldl_data(env, ea+52); + env->gpr_d[14] = cpu_ldl_data(env, ea+56); + env->gpr_d[15] = cpu_ldl_data(env, ea+60); +} + +static void restore_context_lower(CPUTriCoreState *env, int ea, + target_ulong *ra, target_ulong *pcxi) +{ + *pcxi = cpu_ldl_data(env, ea); + *ra = cpu_ldl_data(env, ea+4); + env->gpr_a[2] = cpu_ldl_data(env, ea+8); + env->gpr_a[3] = cpu_ldl_data(env, ea+12); + env->gpr_d[0] = cpu_ldl_data(env, ea+16); + env->gpr_d[1] = cpu_ldl_data(env, ea+20); + env->gpr_d[2] = cpu_ldl_data(env, ea+24); + env->gpr_d[3] = cpu_ldl_data(env, ea+28); + env->gpr_a[4] = cpu_ldl_data(env, ea+32); + env->gpr_a[5] = cpu_ldl_data(env, ea+36); + env->gpr_a[6] = cpu_ldl_data(env, ea+40); + env->gpr_a[7] = cpu_ldl_data(env, ea+44); + env->gpr_d[4] = cpu_ldl_data(env, ea+48); + env->gpr_d[5] = cpu_ldl_data(env, ea+52); + env->gpr_d[6] = cpu_ldl_data(env, ea+56); + env->gpr_d[7] = cpu_ldl_data(env, ea+60); +} + +void helper_call(CPUTriCoreState *env, uint32_t next_pc) +{ + target_ulong tmp_FCX; + target_ulong ea; + target_ulong new_FCX; + target_ulong psw; + + psw = psw_read(env); + /* if (FCX == 0) trap(FCU); */ + if (env->FCX == 0) { + /* FCU trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC()); + } + /* if (PSW.CDE) then if (cdc_increment()) then trap(CDO); */ + if (psw & MASK_PSW_CDE) { + if (cdc_increment(&psw)) { + /* CDO trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CDO, GETPC()); + } + } + /* PSW.CDE = 1;*/ + psw |= MASK_PSW_CDE; + /* tmp_FCX = FCX; */ + tmp_FCX = env->FCX; + /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */ + ea = ((env->FCX & MASK_FCX_FCXS) << 12) + + ((env->FCX & MASK_FCX_FCXO) << 6); + /* new_FCX = M(EA, word); */ + new_FCX = cpu_ldl_data(env, ea); + /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], + A[12], A[13], A[14], A[15], D[12], D[13], D[14], + D[15]}; */ + save_context_upper(env, ea); + + /* PCXI.PCPN = ICR.CCPN; */ + env->PCXI = (env->PCXI & 0xffffff) + + ((env->ICR & MASK_ICR_CCPN) << 24); + /* PCXI.PIE = ICR.IE; */ + env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) + + ((env->ICR & MASK_ICR_IE_1_3) << 15)); + /* PCXI.UL = 1; */ + env->PCXI |= MASK_PCXI_UL; + + /* PCXI[19: 0] = FCX[19: 0]; */ + env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); + /* FCX[19: 0] = new_FCX[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); + /* A[11] = next_pc[31: 0]; */ + env->gpr_a[11] = next_pc; + + /* if (tmp_FCX == LCX) trap(FCD);*/ + if (tmp_FCX == env->LCX) { + /* FCD trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC()); + } + psw_write(env, psw); +} + +void helper_ret(CPUTriCoreState *env) +{ + target_ulong ea; + target_ulong new_PCXI; + target_ulong new_PSW, psw; + + psw = psw_read(env); + /* if (PSW.CDE) then if (cdc_decrement()) then trap(CDU);*/ + if (psw & MASK_PSW_CDE) { + if (cdc_decrement(&psw)) { + /* CDU trap */ + psw_write(env, psw); + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CDU, GETPC()); + } + } + /* if (PCXI[19: 0] == 0) then trap(CSU); */ + if ((env->PCXI & 0xfffff) == 0) { + /* CSU trap */ + psw_write(env, psw); + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC()); + } + /* if (PCXI.UL == 0) then trap(CTYP); */ + if ((env->PCXI & MASK_PCXI_UL) == 0) { + /* CTYP trap */ + cdc_increment(&psw); /* restore to the start of helper */ + psw_write(env, psw); + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC()); + } + /* PC = {A11 [31: 1], 1’b0}; */ + env->PC = env->gpr_a[11] & 0xfffffffe; + + /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */ + ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + + ((env->PCXI & MASK_PCXI_PCXO) << 6); + /* {new_PCXI, new_PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], + A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */ + restore_context_upper(env, ea, &new_PCXI, &new_PSW); + /* M(EA, word) = FCX; */ + cpu_stl_data(env, ea, env->FCX); + /* FCX[19: 0] = PCXI[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); + /* PCXI = new_PCXI; */ + env->PCXI = new_PCXI; + + if (tricore_feature(env, TRICORE_FEATURE_13)) { + /* PSW = new_PSW */ + psw_write(env, new_PSW); + } else { + /* PSW = {new_PSW[31:26], PSW[25:24], new_PSW[23:0]}; */ + psw_write(env, (new_PSW & ~(0x3000000)) + (psw & (0x3000000))); + } +} + +void helper_bisr(CPUTriCoreState *env, uint32_t const9) +{ + target_ulong tmp_FCX; + target_ulong ea; + target_ulong new_FCX; + + if (env->FCX == 0) { + /* FCU trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC()); + } + + tmp_FCX = env->FCX; + ea = ((env->FCX & 0xf0000) << 12) + ((env->FCX & 0xffff) << 6); + + /* new_FCX = M(EA, word); */ + new_FCX = cpu_ldl_data(env, ea); + /* M(EA, 16 * word) = {PCXI, A[11], A[2], A[3], D[0], D[1], D[2], D[3], A[4] + , A[5], A[6], A[7], D[4], D[5], D[6], D[7]}; */ + save_context_lower(env, ea); + + + /* PCXI.PCPN = ICR.CCPN */ + env->PCXI = (env->PCXI & 0xffffff) + + ((env->ICR & MASK_ICR_CCPN) << 24); + /* PCXI.PIE = ICR.IE */ + env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) + + ((env->ICR & MASK_ICR_IE_1_3) << 15)); + /* PCXI.UL = 0 */ + env->PCXI &= ~(MASK_PCXI_UL); + /* PCXI[19: 0] = FCX[19: 0] */ + env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); + /* FXC[19: 0] = new_FCX[19: 0] */ + env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); + /* ICR.IE = 1 */ + env->ICR |= MASK_ICR_IE_1_3; + + env->ICR |= const9; /* ICR.CCPN = const9[7: 0];*/ + + if (tmp_FCX == env->LCX) { + /* FCD trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC()); + } +} + +void helper_rfe(CPUTriCoreState *env) +{ + target_ulong ea; + target_ulong new_PCXI; + target_ulong new_PSW; + /* if (PCXI[19: 0] == 0) then trap(CSU); */ + if ((env->PCXI & 0xfffff) == 0) { + /* raise csu trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC()); + } + /* if (PCXI.UL == 0) then trap(CTYP); */ + if ((env->PCXI & MASK_PCXI_UL) == 0) { + /* raise CTYP trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC()); + } + /* if (!cdc_zero() AND PSW.CDE) then trap(NEST); */ + if (!cdc_zero(&(env->PSW)) && (env->PSW & MASK_PSW_CDE)) { + /* raise NEST trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_NEST, GETPC()); + } + env->PC = env->gpr_a[11] & ~0x1; + /* ICR.IE = PCXI.PIE; */ + env->ICR = (env->ICR & ~MASK_ICR_IE_1_3) + + ((env->PCXI & MASK_PCXI_PIE_1_3) >> 15); + /* ICR.CCPN = PCXI.PCPN; */ + env->ICR = (env->ICR & ~MASK_ICR_CCPN) + + ((env->PCXI & MASK_PCXI_PCPN) >> 24); + /*EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0};*/ + ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + + ((env->PCXI & MASK_PCXI_PCXO) << 6); + /*{new_PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], A[12], + A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */ + restore_context_upper(env, ea, &new_PCXI, &new_PSW); + /* M(EA, word) = FCX;*/ + cpu_stl_data(env, ea, env->FCX); + /* FCX[19: 0] = PCXI[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); + /* PCXI = new_PCXI; */ + env->PCXI = new_PCXI; + /* write psw */ + psw_write(env, new_PSW); +} + +void helper_rfm(CPUTriCoreState *env) +{ + env->PC = (env->gpr_a[11] & ~0x1); + /* ICR.IE = PCXI.PIE; */ + env->ICR = (env->ICR & ~MASK_ICR_IE_1_3) + | ((env->PCXI & MASK_PCXI_PIE_1_3) >> 15); + /* ICR.CCPN = PCXI.PCPN; */ + env->ICR = (env->ICR & ~MASK_ICR_CCPN) | + ((env->PCXI & MASK_PCXI_PCPN) >> 24); + /* {PCXI, PSW, A[10], A[11]} = M(DCX, 4 * word); */ + env->PCXI = cpu_ldl_data(env, env->DCX); + psw_write(env, cpu_ldl_data(env, env->DCX+4)); + env->gpr_a[10] = cpu_ldl_data(env, env->DCX+8); + env->gpr_a[11] = cpu_ldl_data(env, env->DCX+12); + + if (tricore_feature(env, TRICORE_FEATURE_131)) { + env->DBGTCR = 0; + } +} + +void helper_ldlcx(CPUTriCoreState *env, uint32_t ea) +{ + uint32_t dummy; + /* insn doesn't load PCXI and RA */ + restore_context_lower(env, ea, &dummy, &dummy); +} + +void helper_lducx(CPUTriCoreState *env, uint32_t ea) +{ + uint32_t dummy; + /* insn doesn't load PCXI and PSW */ + restore_context_upper(env, ea, &dummy, &dummy); +} + +void helper_stlcx(CPUTriCoreState *env, uint32_t ea) +{ + save_context_lower(env, ea); +} + +void helper_stucx(CPUTriCoreState *env, uint32_t ea) +{ + save_context_upper(env, ea); +} + +void helper_svlcx(CPUTriCoreState *env) +{ + target_ulong tmp_FCX; + target_ulong ea; + target_ulong new_FCX; + + if (env->FCX == 0) { + /* FCU trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC()); + } + /* tmp_FCX = FCX; */ + tmp_FCX = env->FCX; + /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */ + ea = ((env->FCX & MASK_FCX_FCXS) << 12) + + ((env->FCX & MASK_FCX_FCXO) << 6); + /* new_FCX = M(EA, word); */ + new_FCX = cpu_ldl_data(env, ea); + /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], + A[12], A[13], A[14], A[15], D[12], D[13], D[14], + D[15]}; */ + save_context_lower(env, ea); + + /* PCXI.PCPN = ICR.CCPN; */ + env->PCXI = (env->PCXI & 0xffffff) + + ((env->ICR & MASK_ICR_CCPN) << 24); + /* PCXI.PIE = ICR.IE; */ + env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) + + ((env->ICR & MASK_ICR_IE_1_3) << 15)); + /* PCXI.UL = 0; */ + env->PCXI &= ~MASK_PCXI_UL; + + /* PCXI[19: 0] = FCX[19: 0]; */ + env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); + /* FCX[19: 0] = new_FCX[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); + + /* if (tmp_FCX == LCX) trap(FCD);*/ + if (tmp_FCX == env->LCX) { + /* FCD trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC()); + } +} + +void helper_svucx(CPUTriCoreState *env) +{ + target_ulong tmp_FCX; + target_ulong ea; + target_ulong new_FCX; + + if (env->FCX == 0) { + /* FCU trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCU, GETPC()); + } + /* tmp_FCX = FCX; */ + tmp_FCX = env->FCX; + /* EA = {FCX.FCXS, 6'b0, FCX.FCXO, 6'b0}; */ + ea = ((env->FCX & MASK_FCX_FCXS) << 12) + + ((env->FCX & MASK_FCX_FCXO) << 6); + /* new_FCX = M(EA, word); */ + new_FCX = cpu_ldl_data(env, ea); + /* M(EA, 16 * word) = {PCXI, PSW, A[10], A[11], D[8], D[9], D[10], D[11], + A[12], A[13], A[14], A[15], D[12], D[13], D[14], + D[15]}; */ + save_context_upper(env, ea); + + /* PCXI.PCPN = ICR.CCPN; */ + env->PCXI = (env->PCXI & 0xffffff) + + ((env->ICR & MASK_ICR_CCPN) << 24); + /* PCXI.PIE = ICR.IE; */ + env->PCXI = ((env->PCXI & ~MASK_PCXI_PIE_1_3) + + ((env->ICR & MASK_ICR_IE_1_3) << 15)); + /* PCXI.UL = 1; */ + env->PCXI |= MASK_PCXI_UL; + + /* PCXI[19: 0] = FCX[19: 0]; */ + env->PCXI = (env->PCXI & 0xfff00000) + (env->FCX & 0xfffff); + /* FCX[19: 0] = new_FCX[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (new_FCX & 0xfffff); + + /* if (tmp_FCX == LCX) trap(FCD);*/ + if (tmp_FCX == env->LCX) { + /* FCD trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_FCD, GETPC()); + } +} + +void helper_rslcx(CPUTriCoreState *env) +{ + target_ulong ea; + target_ulong new_PCXI; + /* if (PCXI[19: 0] == 0) then trap(CSU); */ + if ((env->PCXI & 0xfffff) == 0) { + /* CSU trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CSU, GETPC()); + } + /* if (PCXI.UL == 1) then trap(CTYP); */ + if ((env->PCXI & MASK_PCXI_UL) != 0) { + /* CTYP trap */ + raise_exception_sync_helper(env, TRAPC_CTX_MNG, TIN3_CTYP, GETPC()); + } + /* EA = {PCXI.PCXS, 6'b0, PCXI.PCXO, 6'b0}; */ + ea = ((env->PCXI & MASK_PCXI_PCXS) << 12) + + ((env->PCXI & MASK_PCXI_PCXO) << 6); + /* {new_PCXI, A[11], A[10], A[11], D[8], D[9], D[10], D[11], A[12], + A[13], A[14], A[15], D[12], D[13], D[14], D[15]} = M(EA, 16 * word); */ + restore_context_lower(env, ea, &env->gpr_a[11], &new_PCXI); + /* M(EA, word) = FCX; */ + cpu_stl_data(env, ea, env->FCX); + /* M(EA, word) = FCX; */ + cpu_stl_data(env, ea, env->FCX); + /* FCX[19: 0] = PCXI[19: 0]; */ + env->FCX = (env->FCX & 0xfff00000) + (env->PCXI & 0x000fffff); + /* PCXI = new_PCXI; */ + env->PCXI = new_PCXI; +} + +void helper_psw_write(CPUTriCoreState *env, uint32_t arg) +{ + psw_write(env, arg); +} + +uint32_t helper_psw_read(CPUTriCoreState *env) +{ + return psw_read(env); +} diff --git a/qemu/target/tricore/translate.c b/qemu/target/tricore/translate.c new file mode 100644 index 00000000..112f2eb0 --- /dev/null +++ b/qemu/target/tricore/translate.c @@ -0,0 +1,9374 @@ +/* + * TriCore emulation for qemu: main translation routines. + * + * Copyright (c) 2013-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +/* + Modified for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" +#include "exec/cpu_ldst.h" +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" +#include "tricore-opcodes.h" +#include "exec/translator.h" +#include "exec/gen-icount.h" + +static const char *regnames_a[] = { + "a0" , "a1" , "a2" , "a3" , "a4" , "a5" , + "a6" , "a7" , "a8" , "a9" , "sp" , "a11" , + "a12" , "a13" , "a14" , "a15", + }; + +static const char *regnames_d[] = { + "d0" , "d1" , "d2" , "d3" , "d4" , "d5" , + "d6" , "d7" , "d8" , "d9" , "d10" , "d11" , + "d12" , "d13" , "d14" , "d15", + }; + +typedef struct DisasContext { + DisasContextBase base; + CPUTriCoreState *env; + target_ulong pc; + // CCOp cc_op; /* Current CC operation */ + target_ulong pc_succ_insn; + uint32_t opcode; + /* Routine used to access memory */ + int mem_idx; + uint32_t hflags, saved_hflags; + uint64_t features; + + // Unicorn + struct uc_struct *uc; +} DisasContext; + +static int has_feature(DisasContext *ctx, int feature) +{ + return (ctx->features & (1ULL << feature)) != 0; +} + +enum { + MODE_LL = 0, + MODE_LU = 1, + MODE_UL = 2, + MODE_UU = 3, +}; + +/* + * Functions to generate micro-ops + */ + +/* Makros for generating helpers */ + +#define gen_helper_1arg(tcg_ctx, name, arg) do { \ + TCGv_i32 helper_tmp = tcg_const_i32(tcg_ctx, arg); \ + gen_helper_##name(tcg_ctx, tcg_ctx->cpu_env, helper_tmp); \ + tcg_temp_free_i32(tcg_ctx, helper_tmp); \ + } while (0) + +#define GEN_HELPER_LL(tcg_ctx, name, ret, arg0, arg1, n) do { \ + TCGv arg00 = tcg_temp_new(tcg_ctx); \ + TCGv arg01 = tcg_temp_new(tcg_ctx); \ + TCGv arg11 = tcg_temp_new(tcg_ctx); \ + tcg_gen_sari_tl(tcg_ctx, arg00, arg0, 16); \ + tcg_gen_ext16s_tl(tcg_ctx, arg01, arg0); \ + tcg_gen_ext16s_tl(tcg_ctx, arg11, arg1); \ + gen_helper_##name(tcg_ctx, ret, arg00, arg01, arg11, arg11, n); \ + tcg_temp_free(tcg_ctx, arg00); \ + tcg_temp_free(tcg_ctx, arg01); \ + tcg_temp_free(tcg_ctx, arg11); \ +} while (0) + +#define GEN_HELPER_LU(tcg_ctx, name, ret, arg0, arg1, n) do { \ + TCGv arg00 = tcg_temp_new(tcg_ctx); \ + TCGv arg01 = tcg_temp_new(tcg_ctx); \ + TCGv arg10 = tcg_temp_new(tcg_ctx); \ + TCGv arg11 = tcg_temp_new(tcg_ctx); \ + tcg_gen_sari_tl(tcg_ctx, arg00, arg0, 16); \ + tcg_gen_ext16s_tl(tcg_ctx, arg01, arg0); \ + tcg_gen_sari_tl(tcg_ctx, arg11, arg1, 16); \ + tcg_gen_ext16s_tl(tcg_ctx, arg10, arg1); \ + gen_helper_##name(tcg_ctx, ret, arg00, arg01, arg10, arg11, n); \ + tcg_temp_free(tcg_ctx, arg00); \ + tcg_temp_free(tcg_ctx, arg01); \ + tcg_temp_free(tcg_ctx, arg10); \ + tcg_temp_free(tcg_ctx, arg11); \ +} while (0) + +#define GEN_HELPER_UL(tcg_ctx, name, ret, arg0, arg1, n) do { \ + TCGv arg00 = tcg_temp_new(tcg_ctx); \ + TCGv arg01 = tcg_temp_new(tcg_ctx); \ + TCGv arg10 = tcg_temp_new(tcg_ctx); \ + TCGv arg11 = tcg_temp_new(tcg_ctx); \ + tcg_gen_sari_tl(tcg_ctx, arg00, arg0, 16); \ + tcg_gen_ext16s_tl(tcg_ctx, arg01, arg0); \ + tcg_gen_sari_tl(tcg_ctx, arg10, arg1, 16); \ + tcg_gen_ext16s_tl(tcg_ctx, arg11, arg1); \ + gen_helper_##name(tcg_ctx, ret, arg00, arg01, arg10, arg11, n); \ + tcg_temp_free(tcg_ctx, arg00); \ + tcg_temp_free(tcg_ctx, arg01); \ + tcg_temp_free(tcg_ctx, arg10); \ + tcg_temp_free(tcg_ctx, arg11); \ +} while (0) + +#define GEN_HELPER_UU(tcg_ctx, name, ret, arg0, arg1, n) do { \ + TCGv arg00 = tcg_temp_new(tcg_ctx); \ + TCGv arg01 = tcg_temp_new(tcg_ctx); \ + TCGv arg11 = tcg_temp_new(tcg_ctx); \ + tcg_gen_sari_tl(tcg_ctx, arg01, arg0, 16); \ + tcg_gen_ext16s_tl(tcg_ctx, arg00, arg0); \ + tcg_gen_sari_tl(tcg_ctx, arg11, arg1, 16); \ + gen_helper_##name(tcg_ctx, ret, arg00, arg01, arg11, arg11, n); \ + tcg_temp_free(tcg_ctx, arg00); \ + tcg_temp_free(tcg_ctx, arg01); \ + tcg_temp_free(tcg_ctx, arg11); \ +} while (0) + +#define GEN_HELPER_RRR(tcg_ctx, name, rl, rh, al1, ah1, arg2) do { \ + TCGv_i64 ret = tcg_temp_new_i64(tcg_ctx); \ + TCGv_i64 arg1 = tcg_temp_new_i64(tcg_ctx); \ + \ + tcg_gen_concat_i32_i64(tcg_ctx, arg1, al1, ah1); \ + gen_helper_##name(tcg_ctx, ret, arg1, arg2); \ + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, ret); \ + \ + tcg_temp_free_i64(tcg_ctx, ret); \ + tcg_temp_free_i64(tcg_ctx, arg1); \ +} while (0) + +#define GEN_HELPER_RR(tcg_ctx, name, rl, rh, arg1, arg2) do { \ + TCGv_i64 ret = tcg_temp_new_i64(tcg_ctx); \ + \ + gen_helper_##name(tcg_ctx, ret, tcg_ctx->cpu_env, arg1, arg2); \ + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, ret); \ + \ + tcg_temp_free_i64(tcg_ctx, ret); \ +} while (0) + +#define EA_ABS_FORMAT(con) (((con & 0x3C000) << 14) + (con & 0x3FFF)) +#define EA_B_ABSOLUT(con) (((offset & 0xf00000) << 8) | \ + ((offset & 0x0fffff) << 1)) + +/* For two 32-bit registers used a 64-bit register, the first + registernumber needs to be even. Otherwise we trap. */ +static inline void generate_trap(DisasContext *ctx, int class, int tin); +#define CHECK_REG_PAIR(reg) do { \ + if (reg & 0x1) { \ + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \ + } \ +} while (0) + +/* Functions for load/save to/from memory */ + +static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, + int16_t con, MemOp mop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, r2, con); + tcg_gen_qemu_ld_tl(tcg_ctx, r1, temp, ctx->mem_idx, mop); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, + int16_t con, MemOp mop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, r2, con); + tcg_gen_qemu_st_tl(tcg_ctx, r1, temp, ctx->mem_idx, mop); + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_st_2regs_64(DisasContext *ctx, TCGv rh, TCGv rl, TCGv address) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 temp = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_concat_i32_i64(tcg_ctx, temp, rl, rh); + // tcg_gen_qemu_st_i64(tcg_ctx, temp, address, ctx->mem_idx, MO_LEUQ); + tcg_gen_qemu_st_i64(tcg_ctx, temp, address, ctx->mem_idx, MO_LE | MO_Q); + + tcg_temp_free_i64(tcg_ctx, temp); +} + +static void gen_offset_st_2regs(DisasContext *ctx, TCGv rh, TCGv rl, TCGv base, int16_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, base, con); + gen_st_2regs_64(ctx, rh, rl, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_ld_2regs_64(DisasContext *ctx, TCGv rh, TCGv rl, TCGv address) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 temp = tcg_temp_new_i64(tcg_ctx); + + // tcg_gen_qemu_ld_i64(tcg_ctx, temp, address, ctx->mem_idx, MO_LEUQ); + tcg_gen_qemu_ld_i64(tcg_ctx, temp, address, ctx->mem_idx, MO_LE | MO_Q); + /* write back to two 32 bit regs */ + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, temp); + + tcg_temp_free_i64(tcg_ctx, temp); +} + +static void gen_offset_ld_2regs(DisasContext *ctx, TCGv rh, TCGv rl, TCGv base, int16_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, base, con); + gen_ld_2regs_64(ctx, rh, rl, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, + MemOp mop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, r2, off); + tcg_gen_qemu_st_tl(tcg_ctx, r1, temp, ctx->mem_idx, mop); + tcg_gen_mov_tl(tcg_ctx, r2, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, + MemOp mop) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, r2, off); + tcg_gen_qemu_ld_tl(tcg_ctx, r1, temp, ctx->mem_idx, mop); + tcg_gen_mov_tl(tcg_ctx, r2, temp); + tcg_temp_free(tcg_ctx, temp); +} + +/* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */ +static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + + CHECK_REG_PAIR(ereg); + /* temp = (M(EA, word) */ + tcg_gen_qemu_ld_tl(tcg_ctx, temp, ea, ctx->mem_idx, MO_LEUL); + /* temp = temp & ~E[a][63:32]) */ + tcg_gen_andc_tl(tcg_ctx, temp, temp, tcg_ctx->cpu_gpr_d[ereg+1]); + /* temp2 = (E[a][31:0] & E[a][63:32]); */ + tcg_gen_and_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[ereg], tcg_ctx->cpu_gpr_d[ereg+1]); + /* temp = temp | temp2; */ + tcg_gen_or_tl(tcg_ctx, temp, temp, temp2); + /* M(EA, word) = temp; */ + tcg_gen_qemu_st_tl(tcg_ctx, temp, ea, ctx->mem_idx, MO_LEUL); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +/* tmp = M(EA, word); + M(EA, word) = D[a]; + D[a] = tmp[31:0];*/ +static void gen_swap(DisasContext *ctx, int reg, TCGv ea) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + + tcg_gen_qemu_ld_tl(tcg_ctx, temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[reg], temp); + + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + tcg_gen_qemu_ld_tl(tcg_ctx, temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, temp2, tcg_ctx->cpu_gpr_d[reg+1], temp, + tcg_ctx->cpu_gpr_d[reg], temp); + tcg_gen_qemu_st_tl(tcg_ctx, temp2, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[reg], temp); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv temp3 = tcg_temp_new(tcg_ctx); + + tcg_gen_qemu_ld_tl(tcg_ctx, temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_and_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[reg], tcg_ctx->cpu_gpr_d[reg+1]); + tcg_gen_andc_tl(tcg_ctx, temp3, temp, tcg_ctx->cpu_gpr_d[reg+1]); + tcg_gen_or_tl(tcg_ctx, temp2, temp2, temp3); + tcg_gen_qemu_st_tl(tcg_ctx, temp2, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[reg], temp); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); +} + + +/* We generate loads and store to core special function register (csfr) through + the function gen_mfcr and gen_mtcr. To handle access permissions, we use 3 + makros R, A and E, which allow read-only, all and endinit protected access. + These makros also specify in which ISA version the csfr was introduced. */ +#define R(ADDRESS, REG, FEATURE) \ + case ADDRESS: \ + if (has_feature(ctx, FEATURE)) { \ + tcg_gen_ld_tl(tcg_ctx, ret, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, REG)); \ + } \ + break; +#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) +#define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) +static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + /* since we're caching PSW make this a special case */ + if (offset == 0xfe04) { + gen_helper_psw_read(tcg_ctx, ret, tcg_ctx->cpu_env); + } else { + switch (offset) { +#include "csfr.def" + } + } +} +#undef R +#undef A +#undef E + +#define R(ADDRESS, REG, FEATURE) /* don't gen writes to read-only reg, + since no execption occurs */ +#define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \ + case ADDRESS: \ + if (has_feature(ctx, FEATURE)) { \ + tcg_gen_st_tl(tcg_ctx, r1, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, REG)); \ + } \ + break; +/* Endinit protected registers + TODO: Since the endinit bit is in a register of a not yet implemented + watchdog device, we handle endinit protected registers like + all-access registers for now. */ +#define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE) +static inline void gen_mtcr(DisasContext *ctx, TCGv r1, + int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) { + /* since we're caching PSW make this a special case */ + if (offset == 0xfe04) { + gen_helper_psw_write(tcg_ctx, tcg_ctx->cpu_env, r1); + } else { + switch (offset) { +#include "csfr.def" + } + } + } else { + /* generate privilege trap */ + } +} + +/* Functions for arithmetic instructions */ + +static inline void gen_add_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new_i32(tcg_ctx); + TCGv result = tcg_temp_new_i32(tcg_ctx); + /* Addition and set V/SV bits */ + tcg_gen_add_tl(tcg_ctx, result, r1, r2); + /* calc V bit */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); + tcg_gen_xor_tl(tcg_ctx, t0, r1, r2); + tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, t0); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(tcg_ctx, ret, result); + + tcg_temp_free(tcg_ctx, result); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void +gen_add64_d(DisasContext *ctx, TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 result = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_add_i64(tcg_ctx, result, r1, r2); + /* calc v bit */ + tcg_gen_xor_i64(tcg_ctx, t1, result, r1); + tcg_gen_xor_i64(tcg_ctx, t0, r1, r2); + tcg_gen_andc_i64(tcg_ctx, t1, t1, t0); + tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t1); + /* calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* calc AV/SAV bits */ + tcg_gen_extrh_i64_i32(tcg_ctx, temp, result); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp, temp); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_i64(tcg_ctx, ret, result); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, result); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static inline void +gen_addsub64_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, void(*op1)(TCGContext*, TCGv, TCGv, TCGv), + void(*op2)(TCGContext*, TCGv, TCGv, TCGv)) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv temp3 = tcg_temp_new(tcg_ctx); + TCGv temp4 = tcg_temp_new(tcg_ctx); + + (*op1)(tcg_ctx, temp, r1_low, r2); + /* calc V0 bit */ + tcg_gen_xor_tl(tcg_ctx, temp2, temp, r1_low); + tcg_gen_xor_tl(tcg_ctx, temp3, r1_low, r2); + if (op1 == tcg_gen_add_tl) { + tcg_gen_andc_tl(tcg_ctx, temp2, temp2, temp3); + } else { + tcg_gen_and_tl(tcg_ctx, temp2, temp2, temp3); + } + + (*op2)(tcg_ctx, temp3, r1_high, r3); + /* calc V1 bit */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, temp3, r1_high); + tcg_gen_xor_tl(tcg_ctx, temp4, r1_high, r3); + if (op2 == tcg_gen_add_tl) { + tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp4); + } else { + tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp4); + } + /* combine V0/V1 bits */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp2); + /* calc sv bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* write result */ + tcg_gen_mov_tl(tcg_ctx, ret_low, temp); + tcg_gen_mov_tl(tcg_ctx, ret_high, temp3); + /* calc AV bit */ + tcg_gen_add_tl(tcg_ctx, temp, ret_low, ret_low); + tcg_gen_xor_tl(tcg_ctx, temp, temp, ret_low); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, ret_high); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp); + /* calc SAV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); + tcg_temp_free(tcg_ctx, temp4); +} + +/* ret = r2 + (r1 * r3); */ +static inline void gen_madd32_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_i32_i64(tcg_ctx, t1, r1); + tcg_gen_ext_i32_i64(tcg_ctx, t2, r2); + tcg_gen_ext_i32_i64(tcg_ctx, t3, r3); + + tcg_gen_mul_i64(tcg_ctx, t1, t1, t3); + tcg_gen_add_i64(tcg_ctx, t1, t2, t1); + + tcg_gen_extrl_i64_i32(tcg_ctx, ret, t1); + /* calc V + t1 > 0x7fffffff */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_GT, t3, t1, 0x7fffffffLL); + /* t1 < -0x80000000 */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LT, t2, t1, -0x80000000LL); + tcg_gen_or_i64(tcg_ctx, t2, t2, t3); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t2); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +} + +static inline void gen_maddi32_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_madd32_d(ctx, ret, r1, r2, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_madd64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + TCGv t4 = tcg_temp_new(tcg_ctx); + + tcg_gen_muls2_tl(tcg_ctx, t1, t2, r1, r3); + /* only the add can overflow */ + tcg_gen_add2_tl(tcg_ctx, t3, t4, r2_low, r2_high, t1, t2); + /* calc V bit */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, t4, r2_high); + tcg_gen_xor_tl(tcg_ctx, t1, r2_high, t2); + tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, t1); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, t4, t4); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, t4, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back the result */ + tcg_gen_mov_tl(tcg_ctx, ret_low, t3); + tcg_gen_mov_tl(tcg_ctx, ret_high, t4); + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t4); +} + +static inline void +gen_maddu64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_i32_i64(tcg_ctx, t1, r1); + tcg_gen_concat_i32_i64(tcg_ctx, t2, r2_low, r2_high); + tcg_gen_extu_i32_i64(tcg_ctx, t3, r3); + + tcg_gen_mul_i64(tcg_ctx, t1, t1, t3); + tcg_gen_add_i64(tcg_ctx, t2, t2, t1); + /* write back result */ + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, t2); + /* only the add overflows, if t2 < t1 + calc V bit */ + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_LTU, t2, t2, t1); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t2); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +} + +static inline void +gen_maddi64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_madd64_d(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_maddui64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_maddu64_d(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_madd_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); + gen_addsub64_h(ctx, ret_low, ret_high, r1_low, r1_high, temp, temp2, + tcg_gen_add_tl, tcg_gen_add_tl); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_maddsu_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); + gen_addsub64_h(ctx, ret_low, ret_high, r1_low, r1_high, temp, temp2, + tcg_gen_sub_tl, tcg_gen_add_tl); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_maddsum_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_3 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(tcg_ctx, temp64_3, r1_low, r1_high); + tcg_gen_sari_i64(tcg_ctx, temp64_2, temp64, 32); /* high */ + tcg_gen_ext32s_i64(tcg_ctx, temp64, temp64); /* low */ + tcg_gen_sub_i64(tcg_ctx, temp64, temp64_2, temp64); + tcg_gen_shli_i64(tcg_ctx, temp64, temp64, 16); + + gen_add64_d(ctx, temp64_2, temp64_3, temp64); + /* write back result */ + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64_2); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); + tcg_temp_free_i64(tcg_ctx, temp64_2); + tcg_temp_free_i64(tcg_ctx, temp64_3); +} + +static inline void gen_adds(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2); + +static inline void +gen_madds_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv temp3 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); + gen_adds(ctx, ret_low, r1_low, temp); + tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_PSW_V); + tcg_gen_mov_tl(tcg_ctx, temp3, tcg_ctx->cpu_PSW_AV); + gen_adds(ctx, ret_high, r1_high, temp2); + /* combine v bits */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); + /* combine av bits */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp3); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); + tcg_temp_free_i64(tcg_ctx, temp64); + +} + +static inline void gen_subs(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2); + +static inline void +gen_maddsus_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv temp3 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); + gen_subs(ctx, ret_low, r1_low, temp); + tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_PSW_V); + tcg_gen_mov_tl(tcg_ctx, temp3, tcg_ctx->cpu_PSW_AV); + gen_adds(ctx, ret_high, r1_high, temp2); + /* combine v bits */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); + /* combine av bits */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp3); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); + tcg_temp_free_i64(tcg_ctx, temp64); + +} + +static inline void +gen_maddsums_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_sari_i64(tcg_ctx, temp64_2, temp64, 32); /* high */ + tcg_gen_ext32s_i64(tcg_ctx, temp64, temp64); /* low */ + tcg_gen_sub_i64(tcg_ctx, temp64, temp64_2, temp64); + tcg_gen_shli_i64(tcg_ctx, temp64, temp64, 16); + tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); + + gen_helper_add64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, temp64_2, temp64); + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); + tcg_temp_free_i64(tcg_ctx, temp64_2); +} + + +static inline void +gen_maddm_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_3 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); + gen_add64_d(ctx, temp64_3, temp64_2, temp64); + /* write back result */ + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64_3); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); + tcg_temp_free_i64(tcg_ctx, temp64_2); + tcg_temp_free_i64(tcg_ctx, temp64_3); +} + +static inline void +gen_maddms_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); + gen_helper_add64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, temp64_2, temp64); + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); + tcg_temp_free_i64(tcg_ctx, temp64_2); +} + +static inline void +gen_maddr64_h(DisasContext *ctx, TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, + uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + gen_helper_addr_h(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, r1_low, r1_high); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_maddr32_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); + tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); + gen_maddr64_h(ctx, ret, temp, temp2, r2, r3, n, mode); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_maddsur32_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); + tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); + gen_helper_addsur_h(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, temp, temp2); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free_i64(tcg_ctx, temp64); +} + + +static inline void +gen_maddr64s_h(DisasContext *ctx, TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, + uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + gen_helper_addr_h_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, r1_low, r1_high); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_maddr32s_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); + tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); + gen_maddr64s_h(ctx, ret, temp, temp2, r2, r3, n, mode); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_maddsur32s_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); + tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); + gen_helper_addsur_h_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, temp, temp2); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_maddr_q(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + gen_helper_maddr_q(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, r3, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_maddrs_q(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + gen_helper_maddr_q_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, r3, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_madd32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, + uint32_t up_shift) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv temp3 = tcg_temp_new(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); + tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); + + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_shli_i64(tcg_ctx, t2, t2, n); + + tcg_gen_ext_i32_i64(tcg_ctx, t1, arg1); + tcg_gen_sari_i64(tcg_ctx, t2, t2, up_shift); + + tcg_gen_add_i64(tcg_ctx, t3, t1, t2); + tcg_gen_extrl_i64_i32(tcg_ctx, temp3, t3); + /* calc v bit */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_GT, t1, t3, 0x7fffffffLL); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LT, t2, t3, -0x80000000LL); + tcg_gen_or_i64(tcg_ctx, t1, t1, t2); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t1); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* We produce an overflow on the host if the mul before was + (0x80000000 * 0x80000000) << 1). If this is the + case, we negate the ovf. */ + if (n == 1) { + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, arg2, 0x80000000); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, temp2, arg2, arg3); + tcg_gen_and_tl(tcg_ctx, temp, temp, temp2); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 31); + /* negate v bit, if special condition */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); + } + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp3, temp3); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp3, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(tcg_ctx, ret, temp3); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +} + +static inline void +gen_m16add32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + if (n == 0) { + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); + } + gen_add_d(ctx, ret, arg1, temp); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_m16adds32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + if (n == 0) { + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); + } + gen_adds(ctx, ret, arg1, temp); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_m16add64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + if (n == 0) { + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); + } + tcg_gen_ext_i32_i64(tcg_ctx, t2, temp); + tcg_gen_shli_i64(tcg_ctx, t2, t2, 16); + tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); + gen_add64_d(ctx, t3, t1, t2); + /* write back result */ + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t3); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_m16adds64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + if (n == 0) { + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); + } + tcg_gen_ext_i32_i64(tcg_ctx, t2, temp); + tcg_gen_shli_i64(tcg_ctx, t2, t2, 16); + tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); + + gen_helper_add64_ssov(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t2); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t1); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +static inline void +gen_madd64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t4 = tcg_temp_new_i64(tcg_ctx); + TCGv temp, temp2; + + tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); + tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); + tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); + + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + if (n != 0) { + tcg_gen_shli_i64(tcg_ctx, t2, t2, 1); + } + tcg_gen_add_i64(tcg_ctx, t4, t1, t2); + /* calc v bit */ + tcg_gen_xor_i64(tcg_ctx, t3, t4, t1); + tcg_gen_xor_i64(tcg_ctx, t2, t1, t2); + tcg_gen_andc_i64(tcg_ctx, t3, t3, t2); + tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t3); + /* We produce an overflow on the host if the mul before was + (0x80000000 * 0x80000000) << 1). If this is the + case, we negate the ovf. */ + if (n == 1) { + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, arg2, 0x80000000); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, temp2, arg2, arg3); + tcg_gen_and_tl(tcg_ctx, temp, temp, temp2); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 31); + /* negate v bit, if special condition */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + } + /* write back result */ + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t4); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, rh); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); + tcg_temp_free_i64(tcg_ctx, t4); +} + +static inline void +gen_madds32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, + uint32_t up_shift) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_i32_i64(tcg_ctx, t1, arg1); + tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); + tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); + + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + tcg_gen_sari_i64(tcg_ctx, t2, t2, up_shift - n); + + gen_helper_madd32_q_add_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, t1, t2); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +} + +static inline void +gen_madds64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 r1 = tcg_temp_new_i64(tcg_ctx); + TCGv temp = tcg_const_i32(tcg_ctx, n); + + tcg_gen_concat_i32_i64(tcg_ctx, r1, arg1_low, arg1_high); + gen_helper_madd64_q_ssov(tcg_ctx, r1, tcg_ctx->cpu_env, r1, arg2, arg3, temp); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, r1); + + tcg_temp_free_i64(tcg_ctx, r1); + tcg_temp_free(tcg_ctx, temp); +} +/* ret = r2 - (r1 * r3); */ +static inline void gen_msub32_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_i32_i64(tcg_ctx, t1, r1); + tcg_gen_ext_i32_i64(tcg_ctx, t2, r2); + tcg_gen_ext_i32_i64(tcg_ctx, t3, r3); + + tcg_gen_mul_i64(tcg_ctx, t1, t1, t3); + tcg_gen_sub_i64(tcg_ctx, t1, t2, t1); + + tcg_gen_extrl_i64_i32(tcg_ctx, ret, t1); + /* calc V + t2 > 0x7fffffff */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_GT, t3, t1, 0x7fffffffLL); + /* result < -0x80000000 */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LT, t2, t1, -0x80000000LL); + tcg_gen_or_i64(tcg_ctx, t2, t2, t3); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t2); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +} + +static inline void gen_msubi32_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_msub32_d(ctx, ret, r1, r2, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_msub64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t1 = tcg_temp_new(tcg_ctx); + TCGv t2 = tcg_temp_new(tcg_ctx); + TCGv t3 = tcg_temp_new(tcg_ctx); + TCGv t4 = tcg_temp_new(tcg_ctx); + + tcg_gen_muls2_tl(tcg_ctx, t1, t2, r1, r3); + /* only the sub can overflow */ + tcg_gen_sub2_tl(tcg_ctx, t3, t4, r2_low, r2_high, t1, t2); + /* calc V bit */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, t4, r2_high); + tcg_gen_xor_tl(tcg_ctx, t1, r2_high, t2); + tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, t1); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, t4, t4); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, t4, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back the result */ + tcg_gen_mov_tl(tcg_ctx, ret_low, t3); + tcg_gen_mov_tl(tcg_ctx, ret_high, t4); + + tcg_temp_free(tcg_ctx, t1); + tcg_temp_free(tcg_ctx, t2); + tcg_temp_free(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, t4); +} + +static inline void +gen_msubi64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_msub64_d(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_msubu64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_extu_i32_i64(tcg_ctx, t1, r1); + tcg_gen_concat_i32_i64(tcg_ctx, t2, r2_low, r2_high); + tcg_gen_extu_i32_i64(tcg_ctx, t3, r3); + + tcg_gen_mul_i64(tcg_ctx, t1, t1, t3); + tcg_gen_sub_i64(tcg_ctx, t3, t2, t1); + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, t3); + /* calc V bit, only the sub can overflow, if t1 > t2 */ + tcg_gen_setcond_i64(tcg_ctx, TCG_COND_GTU, t1, t1, t2); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t1); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); +} + +static inline void +gen_msubui64_d(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_msubu64_d(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_addi_d(DisasContext *ctx, TCGv ret, TCGv r1, target_ulong r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, r2); + gen_add_d(ctx, ret, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} +/* calculate the carry bit too */ +static inline void gen_add_CC(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv t0 = tcg_temp_new_i32(tcg_ctx); + TCGv result = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, t0, 0); + /* Addition and set C/V/SV bits */ + tcg_gen_add2_i32(tcg_ctx, result, tcg_ctx->cpu_PSW_C, r1, t0, r2, t0); + /* calc V bit */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); + tcg_gen_xor_tl(tcg_ctx, t0, r1, r2); + tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, t0); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(tcg_ctx, ret, result); + + tcg_temp_free(tcg_ctx, result); + tcg_temp_free(tcg_ctx, t0); +} + +static inline void gen_addi_CC(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_add_CC(ctx, ret, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_addc_CC(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv carry = tcg_temp_new_i32(tcg_ctx); + TCGv t0 = tcg_temp_new_i32(tcg_ctx); + TCGv result = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, t0, 0); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_NE, carry, tcg_ctx->cpu_PSW_C, 0); + /* Addition, carry and set C/V/SV bits */ + tcg_gen_add2_i32(tcg_ctx, result, tcg_ctx->cpu_PSW_C, r1, t0, carry, t0); + tcg_gen_add2_i32(tcg_ctx, result, tcg_ctx->cpu_PSW_C, result, tcg_ctx->cpu_PSW_C, r2, t0); + /* calc V bit */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); + tcg_gen_xor_tl(tcg_ctx, t0, r1, r2); + tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, t0); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(tcg_ctx, ret, result); + + tcg_temp_free(tcg_ctx, result); + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, carry); +} + +static inline void gen_addci_CC(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_addc_CC(ctx, ret, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_cond_add(DisasContext *ctx, TCGCond cond, TCGv r1, TCGv r2, TCGv r3, + TCGv r4) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv result = tcg_temp_new(tcg_ctx); + TCGv mask = tcg_temp_new(tcg_ctx); + TCGv t0 = tcg_const_i32(tcg_ctx, 0); + + /* create mask for sticky bits */ + tcg_gen_setcond_tl(tcg_ctx, cond, mask, r4, t0); + tcg_gen_shli_tl(tcg_ctx, mask, mask, 31); + + tcg_gen_add_tl(tcg_ctx, result, r1, r2); + /* Calc PSW_V */ + tcg_gen_xor_tl(tcg_ctx, temp, result, r1); + tcg_gen_xor_tl(tcg_ctx, temp2, r1, r2); + tcg_gen_andc_tl(tcg_ctx, temp, temp, temp2); + tcg_gen_movcond_tl(tcg_ctx, cond, tcg_ctx->cpu_PSW_V, r4, t0, temp, tcg_ctx->cpu_PSW_V); + /* Set PSW_SV */ + tcg_gen_and_tl(tcg_ctx, temp, temp, mask); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, temp, tcg_ctx->cpu_PSW_SV); + /* calc AV bit */ + tcg_gen_add_tl(tcg_ctx, temp, result, result); + tcg_gen_xor_tl(tcg_ctx, temp, temp, result); + tcg_gen_movcond_tl(tcg_ctx, cond, tcg_ctx->cpu_PSW_AV, r4, t0, temp, tcg_ctx->cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_and_tl(tcg_ctx, temp, temp, mask); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, temp, tcg_ctx->cpu_PSW_SAV); + /* write back result */ + tcg_gen_movcond_tl(tcg_ctx, cond, r3, r4, t0, result, r1); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, result); + tcg_temp_free(tcg_ctx, mask); +} + +static inline void gen_condi_add(DisasContext *ctx, TCGCond cond, TCGv r1, int32_t r2, + TCGv r3, TCGv r4) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, r2); + gen_cond_add(ctx, cond, r1, temp, r3, r4); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_sub_d(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new_i32(tcg_ctx); + TCGv result = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_sub_tl(tcg_ctx, result, r1, r2); + /* calc V bit */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); + tcg_gen_xor_tl(tcg_ctx, temp, r1, r2); + tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); + /* calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(tcg_ctx, ret, result); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, result); +} + +static inline void +gen_sub64_d(DisasContext *ctx, TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv_i64 t0 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 result = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_sub_i64(tcg_ctx, result, r1, r2); + /* calc v bit */ + tcg_gen_xor_i64(tcg_ctx, t1, result, r1); + tcg_gen_xor_i64(tcg_ctx, t0, r1, r2); + tcg_gen_and_i64(tcg_ctx, t1, t1, t0); + tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t1); + /* calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* calc AV/SAV bits */ + tcg_gen_extrh_i64_i32(tcg_ctx, temp, result); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp, temp); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_i64(tcg_ctx, ret, result); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, result); + tcg_temp_free_i64(tcg_ctx, t0); + tcg_temp_free_i64(tcg_ctx, t1); +} + +static inline void gen_sub_CC(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv result = tcg_temp_new(tcg_ctx); + TCGv temp = tcg_temp_new(tcg_ctx); + + tcg_gen_sub_tl(tcg_ctx, result, r1, r2); + /* calc C bit */ + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_PSW_C, r1, r2); + /* calc V bit */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); + tcg_gen_xor_tl(tcg_ctx, temp, r1, r2); + tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); + /* calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(tcg_ctx, ret, result); + + tcg_temp_free(tcg_ctx, result); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_subc_CC(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + tcg_gen_not_tl(tcg_ctx, temp, r2); + gen_addc_CC(ctx, ret, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_cond_sub(DisasContext *ctx, TCGCond cond, TCGv r1, TCGv r2, TCGv r3, + TCGv r4) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv result = tcg_temp_new(tcg_ctx); + TCGv mask = tcg_temp_new(tcg_ctx); + TCGv t0 = tcg_const_i32(tcg_ctx, 0); + + /* create mask for sticky bits */ + tcg_gen_setcond_tl(tcg_ctx, cond, mask, r4, t0); + tcg_gen_shli_tl(tcg_ctx, mask, mask, 31); + + tcg_gen_sub_tl(tcg_ctx, result, r1, r2); + /* Calc PSW_V */ + tcg_gen_xor_tl(tcg_ctx, temp, result, r1); + tcg_gen_xor_tl(tcg_ctx, temp2, r1, r2); + tcg_gen_and_tl(tcg_ctx, temp, temp, temp2); + tcg_gen_movcond_tl(tcg_ctx, cond, tcg_ctx->cpu_PSW_V, r4, t0, temp, tcg_ctx->cpu_PSW_V); + /* Set PSW_SV */ + tcg_gen_and_tl(tcg_ctx, temp, temp, mask); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, temp, tcg_ctx->cpu_PSW_SV); + /* calc AV bit */ + tcg_gen_add_tl(tcg_ctx, temp, result, result); + tcg_gen_xor_tl(tcg_ctx, temp, temp, result); + tcg_gen_movcond_tl(tcg_ctx, cond, tcg_ctx->cpu_PSW_AV, r4, t0, temp, tcg_ctx->cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_and_tl(tcg_ctx, temp, temp, mask); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, temp, tcg_ctx->cpu_PSW_SAV); + /* write back result */ + tcg_gen_movcond_tl(tcg_ctx, cond, r3, r4, t0, result, r1); + + tcg_temp_free(tcg_ctx, t0); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, result); + tcg_temp_free(tcg_ctx, mask); +} + +static inline void +gen_msub_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); + gen_addsub64_h(ctx, ret_low, ret_high, r1_low, r1_high, temp, temp2, + tcg_gen_sub_tl, tcg_gen_sub_tl); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_msubs_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv temp3 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); + gen_subs(ctx, ret_low, r1_low, temp); + tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_PSW_V); + tcg_gen_mov_tl(tcg_ctx, temp3, tcg_ctx->cpu_PSW_AV); + gen_subs(ctx, ret_high, r1_high, temp2); + /* combine v bits */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); + /* combine av bits */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp3); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_msubm_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_3 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); + gen_sub64_d(ctx, temp64_3, temp64_2, temp64); + /* write back result */ + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64_3); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); + tcg_temp_free_i64(tcg_ctx, temp64_2); + tcg_temp_free_i64(tcg_ctx, temp64_3); +} + +static inline void +gen_msubms_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mulm_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); + gen_helper_sub64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, temp64_2, temp64); + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); + tcg_temp_free_i64(tcg_ctx, temp64_2); +} + +static inline void +gen_msubr64_h(DisasContext *ctx, TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, + uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + gen_helper_subr_h(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, r1_low, r1_high); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_msubr32_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); + tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); + gen_msubr64_h(ctx, ret, temp, temp2, r2, r3, n, mode); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_msubr64s_h(DisasContext *ctx, TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, + uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + gen_helper_subr_h_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, r1_low, r1_high); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_msubr32s_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); + tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); + gen_msubr64s_h(ctx, ret, temp, temp2, r2, r3, n, mode); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_msubr_q(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + gen_helper_msubr_q(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, r3, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_msubrs_q(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + gen_helper_msubr_q_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, r3, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_msub32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, + uint32_t up_shift) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv temp3 = tcg_temp_new(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t4 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); + tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); + + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + + tcg_gen_ext_i32_i64(tcg_ctx, t1, arg1); + /* if we shift part of the fraction out, we need to round up */ + tcg_gen_andi_i64(tcg_ctx, t4, t2, (1ll << (up_shift - n)) - 1); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, t4, t4, 0); + tcg_gen_sari_i64(tcg_ctx, t2, t2, up_shift - n); + tcg_gen_add_i64(tcg_ctx, t2, t2, t4); + + tcg_gen_sub_i64(tcg_ctx, t3, t1, t2); + tcg_gen_extrl_i64_i32(tcg_ctx, temp3, t3); + /* calc v bit */ + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_GT, t1, t3, 0x7fffffffLL); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_LT, t2, t3, -0x80000000LL); + tcg_gen_or_i64(tcg_ctx, t1, t1, t2); + tcg_gen_extrl_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t1); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp3, temp3); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp3, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(tcg_ctx, ret, temp3); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); + tcg_temp_free_i64(tcg_ctx, t4); +} + +static inline void +gen_m16sub32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + if (n == 0) { + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); + } + gen_sub_d(ctx, ret, arg1, temp); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_m16subs32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + if (n == 0) { + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); + } + gen_subs(ctx, ret, arg1, temp); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_m16sub64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + + if (n == 0) { + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); + } + tcg_gen_ext_i32_i64(tcg_ctx, t2, temp); + tcg_gen_shli_i64(tcg_ctx, t2, t2, 16); + tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); + gen_sub64_d(ctx, t3, t1, t2); + /* write back result */ + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t3); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_m16subs64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + + if (n == 0) { + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(tcg_ctx, temp, arg2, arg3); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_tl(tcg_ctx, temp, temp, temp2); + } + tcg_gen_ext_i32_i64(tcg_ctx, t2, temp); + tcg_gen_shli_i64(tcg_ctx, t2, t2, 16); + tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); + + gen_helper_sub64_ssov(tcg_ctx, t1, tcg_ctx->cpu_env, t1, t2); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t1); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); +} + +static inline void +gen_msub64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t4 = tcg_temp_new_i64(tcg_ctx); + TCGv temp, temp2; + + tcg_gen_concat_i32_i64(tcg_ctx, t1, arg1_low, arg1_high); + tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); + tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); + + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + if (n != 0) { + tcg_gen_shli_i64(tcg_ctx, t2, t2, 1); + } + tcg_gen_sub_i64(tcg_ctx, t4, t1, t2); + /* calc v bit */ + tcg_gen_xor_i64(tcg_ctx, t3, t4, t1); + tcg_gen_xor_i64(tcg_ctx, t2, t1, t2); + tcg_gen_and_i64(tcg_ctx, t3, t3, t2); + tcg_gen_extrh_i64_i32(tcg_ctx, tcg_ctx->cpu_PSW_V, t3); + /* We produce an overflow on the host if the mul before was + (0x80000000 * 0x80000000) << 1). If this is the + case, we negate the ovf. */ + if (n == 1) { + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, arg2, 0x80000000); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, temp2, arg2, arg3); + tcg_gen_and_tl(tcg_ctx, temp, temp, temp2); + tcg_gen_shli_tl(tcg_ctx, temp, temp, 31); + /* negate v bit, if special condition */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + } + /* write back result */ + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, t4); + /* Calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV/SAV bits */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, rh); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, tcg_ctx->cpu_PSW_AV); + /* calc SAV */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); + tcg_temp_free_i64(tcg_ctx, t4); +} + +static inline void +gen_msubs32_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, + uint32_t up_shift) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 t1 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t3 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 t4 = tcg_temp_new_i64(tcg_ctx); + + tcg_gen_ext_i32_i64(tcg_ctx, t1, arg1); + tcg_gen_ext_i32_i64(tcg_ctx, t2, arg2); + tcg_gen_ext_i32_i64(tcg_ctx, t3, arg3); + + tcg_gen_mul_i64(tcg_ctx, t2, t2, t3); + /* if we shift part of the fraction out, we need to round up */ + tcg_gen_andi_i64(tcg_ctx, t4, t2, (1ll << (up_shift - n)) - 1); + tcg_gen_setcondi_i64(tcg_ctx, TCG_COND_NE, t4, t4, 0); + tcg_gen_sari_i64(tcg_ctx, t3, t2, up_shift - n); + tcg_gen_add_i64(tcg_ctx, t3, t3, t4); + + gen_helper_msub32_q_sub_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, t1, t3); + + tcg_temp_free_i64(tcg_ctx, t1); + tcg_temp_free_i64(tcg_ctx, t2); + tcg_temp_free_i64(tcg_ctx, t3); + tcg_temp_free_i64(tcg_ctx, t4); +} + +static inline void +gen_msubs64_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, + TCGv arg3, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 r1 = tcg_temp_new_i64(tcg_ctx); + TCGv temp = tcg_const_i32(tcg_ctx, n); + + tcg_gen_concat_i32_i64(tcg_ctx, r1, arg1_low, arg1_high); + gen_helper_msub64_q_ssov(tcg_ctx, r1, tcg_ctx->cpu_env, r1, arg2, arg3, temp); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, r1); + + tcg_temp_free_i64(tcg_ctx, r1); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_msubad_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); + gen_addsub64_h(ctx, ret_low, ret_high, r1_low, r1_high, temp, temp2, + tcg_gen_add_tl, tcg_gen_sub_tl); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_msubadm_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_3 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_concat_i32_i64(tcg_ctx, temp64_3, r1_low, r1_high); + tcg_gen_sari_i64(tcg_ctx, temp64_2, temp64, 32); /* high */ + tcg_gen_ext32s_i64(tcg_ctx, temp64, temp64); /* low */ + tcg_gen_sub_i64(tcg_ctx, temp64, temp64_2, temp64); + tcg_gen_shli_i64(tcg_ctx, temp64, temp64, 16); + + gen_sub64_d(ctx, temp64_2, temp64_3, temp64); + /* write back result */ + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64_2); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); + tcg_temp_free_i64(tcg_ctx, temp64_2); + tcg_temp_free_i64(tcg_ctx, temp64_3); +} + +static inline void +gen_msubadr32_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); + tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); + gen_helper_subadr_h(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, temp, temp2); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_msubads_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv temp3 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_extr_i64_i32(tcg_ctx, temp, temp2, temp64); + gen_adds(ctx, ret_low, r1_low, temp); + tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_PSW_V); + tcg_gen_mov_tl(tcg_ctx, temp3, tcg_ctx->cpu_PSW_AV); + gen_subs(ctx, ret_high, r1_high, temp2); + /* combine v bits */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); + /* combine av bits */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp3); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_msubadms_h(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, + TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp64_2 = tcg_temp_new_i64(tcg_ctx); + + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_sari_i64(tcg_ctx, temp64_2, temp64, 32); /* high */ + tcg_gen_ext32s_i64(tcg_ctx, temp64, temp64); /* low */ + tcg_gen_sub_i64(tcg_ctx, temp64, temp64_2, temp64); + tcg_gen_shli_i64(tcg_ctx, temp64, temp64, 16); + tcg_gen_concat_i32_i64(tcg_ctx, temp64_2, r1_low, r1_high); + + gen_helper_sub64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, temp64_2, temp64); + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp64); + tcg_temp_free_i64(tcg_ctx, temp64_2); +} + +static inline void +gen_msubadr32s_h(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, n); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + switch (mode) { + case MODE_LL: + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_LU: + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UL: + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + case MODE_UU: + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, r2, r3, temp); + break; + } + tcg_gen_andi_tl(tcg_ctx, temp2, r1, 0xffff0000); + tcg_gen_shli_tl(tcg_ctx, temp, r1, 16); + gen_helper_subadr_h_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, temp64, temp, temp2); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void gen_abs(DisasContext *ctx, TCGv ret, TCGv r1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_abs_tl(tcg_ctx, ret, r1); + /* overflow can only happen, if r1 = 0x80000000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, r1, 0x80000000); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); +} + +static inline void gen_absdif(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new_i32(tcg_ctx); + TCGv result = tcg_temp_new_i32(tcg_ctx); + + tcg_gen_sub_tl(tcg_ctx, result, r1, r2); + tcg_gen_sub_tl(tcg_ctx, temp, r2, r1); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, result, r1, r2, result, temp); + + /* calc V bit */ + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, result, r1); + tcg_gen_xor_tl(tcg_ctx, temp, result, r2); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, tcg_ctx->cpu_PSW_V, r1, r2, tcg_ctx->cpu_PSW_V, temp); + tcg_gen_xor_tl(tcg_ctx, temp, r1, r2); + tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, temp); + /* calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, result); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, result, tcg_ctx->cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* write back result */ + tcg_gen_mov_tl(tcg_ctx, ret, result); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, result); +} + +static inline void gen_absdifi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_absdif(ctx, ret, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_absdifsi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_helper_absdif_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_mul_i32s(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv high = tcg_temp_new(tcg_ctx); + TCGv low = tcg_temp_new(tcg_ctx); + + tcg_gen_muls2_tl(tcg_ctx, low, high, r1, r2); + tcg_gen_mov_tl(tcg_ctx, ret, low); + /* calc V bit */ + tcg_gen_sari_tl(tcg_ctx, low, low, 31); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_PSW_V, high, low); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + + tcg_temp_free(tcg_ctx, high); + tcg_temp_free(tcg_ctx, low); +} + +static inline void gen_muli_i32s(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_mul_i32s(ctx, ret, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_mul_i64s(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_muls2_tl(tcg_ctx, ret_low, ret_high, r1, r2); + /* clear V bit */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + /* calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, tcg_ctx->cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); +} + +static inline void gen_muli_i64s(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, + int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_mul_i64s(ctx, ret_low, ret_high, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_mul_i64u(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_mulu2_tl(tcg_ctx, ret_low, ret_high, r1, r2); + /* clear V bit */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + /* calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* Calc AV bit */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret_high, tcg_ctx->cpu_PSW_AV); + /* calc SAV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); +} + +static inline void gen_muli_i64u(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, + int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_mul_i64u(ctx, ret_low, ret_high, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_mulsi_i32(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_helper_mul_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_mulsui_i32(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_helper_mul_suov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} +/* gen_maddsi_32(tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); */ +static inline void gen_maddsi_32(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_helper_madd32_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_maddsui_32(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_helper_madd32_suov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static void +gen_mul_q(DisasContext *ctx, TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv_i64 temp_64 = tcg_temp_new_i64(tcg_ctx); + TCGv_i64 temp2_64 = tcg_temp_new_i64(tcg_ctx); + + if (n == 0) { + if (up_shift == 32) { + tcg_gen_muls2_tl(tcg_ctx, rh, rl, arg1, arg2); + } else if (up_shift == 16) { + tcg_gen_ext_i32_i64(tcg_ctx, temp_64, arg1); + tcg_gen_ext_i32_i64(tcg_ctx, temp2_64, arg2); + + tcg_gen_mul_i64(tcg_ctx, temp_64, temp_64, temp2_64); + tcg_gen_shri_i64(tcg_ctx, temp_64, temp_64, up_shift); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, temp_64); + } else { + tcg_gen_muls2_tl(tcg_ctx, rl, rh, arg1, arg2); + } + /* reset v bit */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + } else { /* n is expected to be 1 */ + tcg_gen_ext_i32_i64(tcg_ctx, temp_64, arg1); + tcg_gen_ext_i32_i64(tcg_ctx, temp2_64, arg2); + + tcg_gen_mul_i64(tcg_ctx, temp_64, temp_64, temp2_64); + + if (up_shift == 0) { + tcg_gen_shli_i64(tcg_ctx, temp_64, temp_64, 1); + } else { + tcg_gen_shri_i64(tcg_ctx, temp_64, temp_64, up_shift - 1); + } + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, temp_64); + /* overflow only occurs if r1 = r2 = 0x8000 */ + if (up_shift == 0) {/* result is 64 bit */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, rh, + 0x80000000); + } else { /* result is 32 bit */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, rl, + 0x80000000); + } + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* calc sv overflow bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + } + /* calc av overflow bit */ + if (up_shift == 0) { + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, rh); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rh, tcg_ctx->cpu_PSW_AV); + } else { + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rl, rl); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, rl, tcg_ctx->cpu_PSW_AV); + } + /* calc sav overflow bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free_i64(tcg_ctx, temp_64); + tcg_temp_free_i64(tcg_ctx, temp2_64); +} + +static void +gen_mul_q_16(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + if (n == 0) { + tcg_gen_mul_tl(tcg_ctx, ret, arg1, arg2); + } else { /* n is expected to be 1 */ + tcg_gen_mul_tl(tcg_ctx, ret, arg1, arg2); + tcg_gen_shli_tl(tcg_ctx, ret, ret, 1); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, ret, 0x80000000); + tcg_gen_sub_tl(tcg_ctx, ret, ret, temp); + } + /* reset v bit */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + /* calc av overflow bit */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); + /* calc sav overflow bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_mulr_q(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + if (n == 0) { + tcg_gen_mul_tl(tcg_ctx, ret, arg1, arg2); + tcg_gen_addi_tl(tcg_ctx, ret, ret, 0x8000); + } else { + tcg_gen_mul_tl(tcg_ctx, ret, arg1, arg2); + tcg_gen_shli_tl(tcg_ctx, ret, ret, 1); + tcg_gen_addi_tl(tcg_ctx, ret, ret, 0x8000); + /* catch special case r1 = r2 = 0x8000 */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, ret, 0x80008000); + tcg_gen_muli_tl(tcg_ctx, temp, temp, 0x8001); + tcg_gen_sub_tl(tcg_ctx, ret, ret, temp); + } + /* reset v bit */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + /* calc av overflow bit */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); + /* calc sav overflow bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* cut halfword off */ + tcg_gen_andi_tl(tcg_ctx, ret, ret, 0xffff0000); + + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_madds_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, temp64, r2_low, r2_high); + gen_helper_madd64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, r1, temp64, r3); + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_maddsi_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_madds_64(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_maddsu_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, temp64, r2_low, r2_high); + gen_helper_madd64_suov(tcg_ctx, temp64, tcg_ctx->cpu_env, r1, temp64, r3); + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_maddsui_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_maddsu_64(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_msubsi_32(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_helper_msub32_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_msubsui_32(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_helper_msub32_suov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_msubs_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, temp64, r2_low, r2_high); + gen_helper_msub64_ssov(tcg_ctx, temp64, tcg_ctx->cpu_env, r1, temp64, r3); + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_msubsi_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_msubs_64(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void +gen_msubsu_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + TCGv r3) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 temp64 = tcg_temp_new_i64(tcg_ctx); + tcg_gen_concat_i32_i64(tcg_ctx, temp64, r2_low, r2_high); + gen_helper_msub64_suov(tcg_ctx, temp64, tcg_ctx->cpu_env, r1, temp64, r3); + tcg_gen_extr_i64_i32(tcg_ctx, ret_low, ret_high, temp64); + tcg_temp_free_i64(tcg_ctx, temp64); +} + +static inline void +gen_msubsui_64(DisasContext *ctx, TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, + int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_msubsu_64(ctx, ret_low, ret_high, r1, r2_low, r2_high, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_saturate(DisasContext *ctx, TCGv ret, TCGv arg, int32_t up, int32_t low) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv sat_neg = tcg_const_i32(tcg_ctx, low); + TCGv temp = tcg_const_i32(tcg_ctx, up); + + /* sat_neg = (arg < low ) ? low : arg; */ + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LT, sat_neg, arg, sat_neg, sat_neg, arg); + + /* ret = (sat_neg > up ) ? up : sat_neg; */ + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, ret, sat_neg, temp, temp, sat_neg); + + tcg_temp_free(tcg_ctx, sat_neg); + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_saturate_u(DisasContext *ctx, TCGv ret, TCGv arg, int32_t up) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, up); + /* sat_neg = (arg > up ) ? up : arg; */ + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GTU, ret, arg, temp, temp, arg); + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_shi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t shift_count) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + if (shift_count == -32) { + tcg_gen_movi_tl(tcg_ctx, ret, 0); + } else if (shift_count >= 0) { + tcg_gen_shli_tl(tcg_ctx, ret, r1, shift_count); + } else { + tcg_gen_shri_tl(tcg_ctx, ret, r1, -shift_count); + } +} + +static void gen_sh_hi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t shiftcount) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp_low, temp_high; + + if (shiftcount == -16) { + tcg_gen_movi_tl(tcg_ctx, ret, 0); + } else { + temp_high = tcg_temp_new(tcg_ctx); + temp_low = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, temp_low, r1, 0xffff); + tcg_gen_andi_tl(tcg_ctx, temp_high, r1, 0xffff0000); + gen_shi(ctx, temp_low, temp_low, shiftcount); + gen_shi(ctx, ret, temp_high, shiftcount); + tcg_gen_deposit_tl(tcg_ctx, ret, ret, temp_low, 0, 16); + + tcg_temp_free(tcg_ctx, temp_low); + tcg_temp_free(tcg_ctx, temp_high); + } +} + +static void gen_shaci(DisasContext *ctx, TCGv ret, TCGv r1, int32_t shift_count) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t msk, msk_start; + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + TCGv t_0 = tcg_const_i32(tcg_ctx, 0); + + if (shift_count == 0) { + /* Clear PSW.C and PSW.V */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_C, 0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_C); + tcg_gen_mov_tl(tcg_ctx, ret, r1); + } else if (shift_count == -32) { + /* set PSW.C */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_C, r1); + /* fill ret completely with sign bit */ + tcg_gen_sari_tl(tcg_ctx, ret, r1, 31); + /* clear PSW.V */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + } else if (shift_count > 0) { + TCGv t_max = tcg_const_i32(tcg_ctx, 0x7FFFFFFF >> shift_count); + TCGv t_min = tcg_const_i32(tcg_ctx, ((int32_t) -0x80000000) >> shift_count); + + /* calc carry */ + msk_start = 32 - shift_count; + msk = ((1 << shift_count) - 1) << msk_start; + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PSW_C, r1, msk); + /* calc v/sv bits */ + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GT, temp, r1, t_max); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, temp2, r1, t_min); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, temp, temp2); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* calc sv */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_SV); + /* do shift */ + tcg_gen_shli_tl(tcg_ctx, ret, r1, shift_count); + + tcg_temp_free(tcg_ctx, t_max); + tcg_temp_free(tcg_ctx, t_min); + } else { + /* clear PSW.V */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + /* calc carry */ + msk = (1 << -shift_count) - 1; + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PSW_C, r1, msk); + /* do shift */ + tcg_gen_sari_tl(tcg_ctx, ret, r1, -shift_count); + } + /* calc av overflow bit */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, ret); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, ret, tcg_ctx->cpu_PSW_AV); + /* calc sav overflow bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, t_0); +} + +static void gen_shas(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_helper_sha_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); +} + +static void gen_shasi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_shas(ctx, ret, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_sha_hi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t shift_count) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv low, high; + + if (shift_count == 0) { + tcg_gen_mov_tl(tcg_ctx, ret, r1); + } else if (shift_count > 0) { + low = tcg_temp_new(tcg_ctx); + high = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, high, r1, 0xffff0000); + tcg_gen_shli_tl(tcg_ctx, low, r1, shift_count); + tcg_gen_shli_tl(tcg_ctx, ret, high, shift_count); + tcg_gen_deposit_tl(tcg_ctx, ret, ret, low, 0, 16); + + tcg_temp_free(tcg_ctx, low); + tcg_temp_free(tcg_ctx, high); + } else { + low = tcg_temp_new(tcg_ctx); + high = tcg_temp_new(tcg_ctx); + + tcg_gen_ext16s_tl(tcg_ctx, low, r1); + tcg_gen_sari_tl(tcg_ctx, low, low, -shift_count); + tcg_gen_sari_tl(tcg_ctx, ret, r1, -shift_count); + tcg_gen_deposit_tl(tcg_ctx, ret, ret, low, 0, 16); + + tcg_temp_free(tcg_ctx, low); + tcg_temp_free(tcg_ctx, high); + } + +} + +/* ret = {ret[30:0], (r1 cond r2)}; */ +static void gen_sh_cond(DisasContext *ctx, int cond, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_shli_tl(tcg_ctx, temp, ret, 1); + tcg_gen_setcond_tl(tcg_ctx, cond, temp2, r1, r2); + tcg_gen_or_tl(tcg_ctx, ret, temp, temp2); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static void gen_sh_condi(DisasContext *ctx, int cond, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_sh_cond(ctx, cond, ret, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_adds(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_helper_add_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); +} + +static inline void gen_addsi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_helper_add_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_addsui(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_helper_add_suov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, temp); + tcg_temp_free(tcg_ctx, temp); +} + +static inline void gen_subs(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_helper_sub_ssov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); +} + +static inline void gen_subsu(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + gen_helper_sub_suov(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); +} + +static inline void gen_bit_2op(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, + int pos1, int pos2, + void(*op1)(TCGContext*, TCGv, TCGv, TCGv), + void(*op2)(TCGContext*, TCGv, TCGv, TCGv)) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp1, temp2; + + temp1 = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_shri_tl(tcg_ctx, temp2, r2, pos2); + tcg_gen_shri_tl(tcg_ctx, temp1, r1, pos1); + + (*op1)(tcg_ctx, temp1, temp1, temp2); + (*op2)(tcg_ctx, temp1 , ret, temp1); + + tcg_gen_deposit_tl(tcg_ctx, ret, ret, temp1, 0, 1); + + tcg_temp_free(tcg_ctx, temp1); + tcg_temp_free(tcg_ctx, temp2); +} + +/* ret = r1[pos1] op1 r2[pos2]; */ +static inline void gen_bit_1op(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, + int pos1, int pos2, + void(*op1)(TCGContext*, TCGv, TCGv, TCGv)) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp1, temp2; + + temp1 = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_shri_tl(tcg_ctx, temp2, r2, pos2); + tcg_gen_shri_tl(tcg_ctx, temp1, r1, pos1); + + (*op1)(tcg_ctx, ret, temp1, temp2); + + tcg_gen_andi_tl(tcg_ctx, ret, ret, 0x1); + + tcg_temp_free(tcg_ctx, temp1); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void gen_accumulating_cond(DisasContext *ctx, int cond, TCGv ret, TCGv r1, TCGv r2, + void(*op)(TCGContext*, TCGv, TCGv, TCGv)) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + /* temp = (arg1 cond arg2 )*/ + tcg_gen_setcond_tl(tcg_ctx, cond, temp, r1, r2); + /* temp2 = ret[0]*/ + tcg_gen_andi_tl(tcg_ctx, temp2, ret, 0x1); + /* temp = temp insn temp2 */ + (*op)(tcg_ctx, temp, temp, temp2); + /* ret = {ret[31:1], temp} */ + tcg_gen_deposit_tl(tcg_ctx, ret, ret, temp, 0, 1); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void +gen_accumulating_condi(DisasContext *ctx, int cond, TCGv ret, TCGv r1, int32_t con, + void(*op)(TCGContext*, TCGv, TCGv, TCGv)) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, con); + gen_accumulating_cond(ctx, cond, ret, r1, temp, op); + tcg_temp_free(tcg_ctx, temp); +} + +/* ret = (r1 cond r2) ? 0xFFFFFFFF ? 0x00000000;*/ +static inline void gen_cond_w(DisasContext *ctx, TCGCond cond, TCGv ret, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_setcond_tl(tcg_ctx, cond, ret, r1, r2); + tcg_gen_neg_tl(tcg_ctx, ret, ret); +} + +static inline void gen_eqany_bi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv b0 = tcg_temp_new(tcg_ctx); + TCGv b1 = tcg_temp_new(tcg_ctx); + TCGv b2 = tcg_temp_new(tcg_ctx); + TCGv b3 = tcg_temp_new(tcg_ctx); + + /* byte 0 */ + tcg_gen_andi_tl(tcg_ctx, b0, r1, 0xff); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, b0, b0, con & 0xff); + + /* byte 1 */ + tcg_gen_andi_tl(tcg_ctx, b1, r1, 0xff00); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, b1, b1, con & 0xff00); + + /* byte 2 */ + tcg_gen_andi_tl(tcg_ctx, b2, r1, 0xff0000); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, b2, b2, con & 0xff0000); + + /* byte 3 */ + tcg_gen_andi_tl(tcg_ctx, b3, r1, 0xff000000); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, b3, b3, con & 0xff000000); + + /* combine them */ + tcg_gen_or_tl(tcg_ctx, ret, b0, b1); + tcg_gen_or_tl(tcg_ctx, ret, ret, b2); + tcg_gen_or_tl(tcg_ctx, ret, ret, b3); + + tcg_temp_free(tcg_ctx, b0); + tcg_temp_free(tcg_ctx, b1); + tcg_temp_free(tcg_ctx, b2); + tcg_temp_free(tcg_ctx, b3); +} + +static inline void gen_eqany_hi(DisasContext *ctx, TCGv ret, TCGv r1, int32_t con) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv h0 = tcg_temp_new(tcg_ctx); + TCGv h1 = tcg_temp_new(tcg_ctx); + + /* halfword 0 */ + tcg_gen_andi_tl(tcg_ctx, h0, r1, 0xffff); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, h0, h0, con & 0xffff); + + /* halfword 1 */ + tcg_gen_andi_tl(tcg_ctx, h1, r1, 0xffff0000); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, h1, h1, con & 0xffff0000); + + /* combine them */ + tcg_gen_or_tl(tcg_ctx, ret, h0, h1); + + tcg_temp_free(tcg_ctx, h0); + tcg_temp_free(tcg_ctx, h1); +} +/* mask = ((1 << width) -1) << pos; + ret = (r1 & ~mask) | (r2 << pos) & mask); */ +static inline void gen_insert(DisasContext *ctx, TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv mask = tcg_temp_new(tcg_ctx); + TCGv temp = tcg_temp_new(tcg_ctx); + TCGv temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, mask, 1); + tcg_gen_shl_tl(tcg_ctx, mask, mask, width); + tcg_gen_subi_tl(tcg_ctx, mask, mask, 1); + tcg_gen_shl_tl(tcg_ctx, mask, mask, pos); + + tcg_gen_shl_tl(tcg_ctx, temp, r2, pos); + tcg_gen_and_tl(tcg_ctx, temp, temp, mask); + tcg_gen_andc_tl(tcg_ctx, temp2, r1, mask); + tcg_gen_or_tl(tcg_ctx, ret, temp, temp2); + + tcg_temp_free(tcg_ctx, mask); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static inline void gen_bsplit(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 temp = tcg_temp_new_i64(tcg_ctx); + + gen_helper_bsplit(tcg_ctx, temp, r1); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, temp); + + tcg_temp_free_i64(tcg_ctx, temp); +} + +static inline void gen_unpack(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 temp = tcg_temp_new_i64(tcg_ctx); + + gen_helper_unpack(tcg_ctx, temp, r1); + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, temp); + + tcg_temp_free_i64(tcg_ctx, temp); +} + +static inline void +gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 ret = tcg_temp_new_i64(tcg_ctx); + + if (!has_feature(ctx, TRICORE_FEATURE_131)) { + gen_helper_dvinit_b_13(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); + } else { + gen_helper_dvinit_b_131(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); + } + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, ret); + + tcg_temp_free_i64(tcg_ctx, ret); +} + +static inline void +gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i64 ret = tcg_temp_new_i64(tcg_ctx); + + if (!has_feature(ctx, TRICORE_FEATURE_131)) { + gen_helper_dvinit_h_13(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); + } else { + gen_helper_dvinit_h_131(tcg_ctx, ret, tcg_ctx->cpu_env, r1, r2); + } + tcg_gen_extr_i64_i32(tcg_ctx, rl, rh, ret); + + tcg_temp_free_i64(tcg_ctx, ret); +} + +static void gen_calc_usb_mul_h(DisasContext *ctx, TCGv arg_low, TCGv arg_high) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + /* calc AV bit */ + tcg_gen_add_tl(tcg_ctx, temp, arg_low, arg_low); + tcg_gen_xor_tl(tcg_ctx, temp, temp, arg_low); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, arg_high, arg_high); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, arg_high); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp); + /* calc SAV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_calc_usb_mulr_h(DisasContext *ctx, TCGv arg) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + /* calc AV bit */ + tcg_gen_add_tl(tcg_ctx, temp, arg, arg); + tcg_gen_xor_tl(tcg_ctx, temp, temp, arg); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, temp, 16); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_AV, temp); + /* calc SAV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + /* clear V bit */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + tcg_temp_free(tcg_ctx, temp); +} + +/* helpers for generating program flow micro-ops */ + +static inline void gen_save_pc(DisasContext *ctx, target_ulong pc) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PC, pc); +} + +static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) +{ + if (unlikely(ctx->base.singlestep_enabled)) { + return false; + } + + return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); +} + +static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + // if (translator_use_goto_tb(&ctx->base, dest)) { + if (use_goto_tb(ctx, dest)) { + tcg_gen_goto_tb(tcg_ctx, n); + gen_save_pc(ctx, dest); + tcg_gen_exit_tb(tcg_ctx, ctx->base.tb, n); + } else { + gen_save_pc(ctx, dest); + tcg_gen_lookup_and_goto_ptr(tcg_ctx); + } +} + +static void generate_trap(DisasContext *ctx, int class, int tin) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv_i32 classtemp = tcg_const_i32(tcg_ctx, class); + TCGv_i32 tintemp = tcg_const_i32(tcg_ctx, tin); + + gen_save_pc(ctx, ctx->base.pc_next); + gen_helper_raise_exception_sync(tcg_ctx, tcg_ctx->cpu_env, classtemp, tintemp); + ctx->base.is_jmp = DISAS_NORETURN; + + tcg_temp_free(tcg_ctx, classtemp); + tcg_temp_free(tcg_ctx, tintemp); +} + +static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1, + TCGv r2, int16_t address) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGLabel *jumpLabel = gen_new_label(tcg_ctx); + tcg_gen_brcond_tl(tcg_ctx, cond, r1, r2, jumpLabel); + + gen_goto_tb(ctx, 1, ctx->pc_succ_insn); + + gen_set_label(tcg_ctx, jumpLabel); + gen_goto_tb(ctx, 0, ctx->base.pc_next + address * 2); +} + +static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1, + int r2, int16_t address) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_const_i32(tcg_ctx, r2); + gen_branch_cond(ctx, cond, r1, temp, address); + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_loop(DisasContext *ctx, int r1, int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGLabel *l1 = gen_new_label(tcg_ctx); + + tcg_gen_subi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r1], 1); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_a[r1], -1, l1); + gen_goto_tb(ctx, 1, ctx->base.pc_next + offset); + gen_set_label(tcg_ctx, l1); + gen_goto_tb(ctx, 0, ctx->pc_succ_insn); +} + +static void gen_fcall_save_ctx(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[10], -4); + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], ctx->pc_succ_insn); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[10], temp); + + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_fret(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[11], ~0x1); + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], tcg_ctx->cpu_gpr_a[10], ctx->mem_idx, MO_LESL); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[10], tcg_ctx->cpu_gpr_a[10], 4); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PC, temp); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + ctx->base.is_jmp = DISAS_NORETURN; + + tcg_temp_free(tcg_ctx, temp); +} + +static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, + int r2 , int32_t constant , int32_t offset) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + TCGv temp, temp2; + int n; + + switch (opc) { +/* SB-format jumps */ + case OPC1_16_SB_J: + case OPC1_32_B_J: + gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); + break; + case OPC1_32_B_CALL: + case OPC1_16_SB_CALL: + gen_helper_1arg(tcg_ctx, call, ctx->pc_succ_insn); + gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); + break; + case OPC1_16_SB_JZ: + gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[15], 0, offset); + break; + case OPC1_16_SB_JNZ: + gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[15], 0, offset); + break; +/* SBC-format jumps */ + case OPC1_16_SBC_JEQ: + gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[15], constant, offset); + break; + case OPC1_16_SBC_JEQ2: + gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[15], constant, + offset + 16); + break; + case OPC1_16_SBC_JNE: + gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[15], constant, offset); + break; + case OPC1_16_SBC_JNE2: + gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[15], + constant, offset + 16); + break; +/* SBRN-format jumps */ + case OPC1_16_SBRN_JZ_T: + temp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[15], 0x1u << constant); + gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC1_16_SBRN_JNZ_T: + temp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[15], 0x1u << constant); + gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); + tcg_temp_free(tcg_ctx, temp); + break; +/* SBR-format jumps */ + case OPC1_16_SBR_JEQ: + gen_branch_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], + offset); + break; + case OPC1_16_SBR_JEQ2: + gen_branch_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], + offset + 16); + break; + case OPC1_16_SBR_JNE: + gen_branch_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], + offset); + break; + case OPC1_16_SBR_JNE2: + gen_branch_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], + offset + 16); + break; + case OPC1_16_SBR_JNZ: + gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JNZ_A: + gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_a[r1], 0, offset); + break; + case OPC1_16_SBR_JGEZ: + gen_branch_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JGTZ: + gen_branch_condi(ctx, TCG_COND_GT, tcg_ctx->cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JLEZ: + gen_branch_condi(ctx, TCG_COND_LE, tcg_ctx->cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JLTZ: + gen_branch_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JZ: + gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], 0, offset); + break; + case OPC1_16_SBR_JZ_A: + gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_a[r1], 0, offset); + break; + case OPC1_16_SBR_LOOP: + gen_loop(ctx, r1, offset * 2 - 32); + break; +/* SR-format jumps */ + case OPC1_16_SR_JI: + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PC, tcg_ctx->cpu_gpr_a[r1], 0xfffffffe); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + break; + case OPC2_32_SYS_RET: + case OPC2_16_SR_RET: + gen_helper_ret(tcg_ctx, tcg_ctx->cpu_env); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + break; +/* B-format */ + case OPC1_32_B_CALLA: + gen_helper_1arg(tcg_ctx, call, ctx->pc_succ_insn); + gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); + break; + case OPC1_32_B_FCALL: + gen_fcall_save_ctx(ctx); + gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); + break; + case OPC1_32_B_FCALLA: + gen_fcall_save_ctx(ctx); + gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); + break; + case OPC1_32_B_JLA: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], ctx->pc_succ_insn); + /* fall through */ + case OPC1_32_B_JA: + gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); + break; + case OPC1_32_B_JL: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], ctx->pc_succ_insn); + gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); + break; +/* BOL format */ + case OPCM_32_BRC_EQ_NEQ: + if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JEQ) { + gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], constant, offset); + } else { + gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], constant, offset); + } + break; + case OPCM_32_BRC_GE: + if (MASK_OP_BRC_OP2(ctx->opcode) == OP2_32_BRC_JGE) { + gen_branch_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r1], constant, offset); + } else { + constant = MASK_OP_BRC_CONST4(ctx->opcode); + gen_branch_condi(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r1], constant, + offset); + } + break; + case OPCM_32_BRC_JLT: + if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JLT) { + gen_branch_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r1], constant, offset); + } else { + constant = MASK_OP_BRC_CONST4(ctx->opcode); + gen_branch_condi(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r1], constant, + offset); + } + break; + case OPCM_32_BRC_JNE: + temp = tcg_temp_new(tcg_ctx); + if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) { + tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + /* subi is unconditional */ + tcg_gen_subi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 1); + gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); + } else { + tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + /* addi is unconditional */ + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 1); + gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); + } + tcg_temp_free(tcg_ctx, temp); + break; +/* BRN format */ + case OPCM_32_BRN_JTT: + n = MASK_OP_BRN_N(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], (1 << n)); + + if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) { + gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); + } else { + gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); + } + tcg_temp_free(tcg_ctx, temp); + break; +/* BRR Format */ + case OPCM_32_BRR_EQ_NEQ: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ) { + gen_branch_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + offset); + } else { + gen_branch_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + offset); + } + break; + case OPCM_32_BRR_ADDR_EQ_NEQ: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JEQ_A) { + gen_branch_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], + offset); + } else { + gen_branch_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], + offset); + } + break; + case OPCM_32_BRR_GE: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JGE) { + gen_branch_cond(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + offset); + } else { + gen_branch_cond(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + offset); + } + break; + case OPCM_32_BRR_JLT: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JLT) { + gen_branch_cond(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + offset); + } else { + gen_branch_cond(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + offset); + } + break; + case OPCM_32_BRR_LOOP: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_LOOP) { + gen_loop(ctx, r2, offset * 2); + } else { + /* OPC2_32_BRR_LOOPU */ + gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); + } + break; + case OPCM_32_BRR_JNE: + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) { + tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + /* also save r2, in case of r1 == r2, so r2 is not decremented */ + tcg_gen_mov_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + /* subi is unconditional */ + tcg_gen_subi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 1); + gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); + } else { + tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + /* also save r2, in case of r1 == r2, so r2 is not decremented */ + tcg_gen_mov_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + /* addi is unconditional */ + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 1); + gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); + } + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + break; + case OPCM_32_BRR_JNZ: + if (MASK_OP_BRR_OP2(ctx->opcode) == OPC2_32_BRR_JNZ_A) { + gen_branch_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_a[r1], 0, offset); + } else { + gen_branch_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_a[r1], 0, offset); + } + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + ctx->base.is_jmp = DISAS_NORETURN; +} + + +/* + * Functions for decoding instructions + */ + +static void decode_src_opc(DisasContext *ctx, int op1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int r1; + int32_t const4; + TCGv temp, temp2; + + r1 = MASK_OP_SRC_S1D(ctx->opcode); + const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode); + + switch (op1) { + case OPC1_16_SRC_ADD: + gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_ADD_A15: + gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], const4); + break; + case OPC1_16_SRC_ADD_15A: + gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_ADD_A: + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r1], const4); + break; + case OPC1_16_SRC_CADD: + gen_condi_add(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], const4, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[15]); + break; + case OPC1_16_SRC_CADDN: + gen_condi_add(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], const4, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[15]); + break; + case OPC1_16_SRC_CMOV: + temp = tcg_const_tl(tcg_ctx, 0); + temp2 = tcg_const_tl(tcg_ctx, const4); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], temp, + temp2, tcg_ctx->cpu_gpr_d[r1]); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + break; + case OPC1_16_SRC_CMOVN: + temp = tcg_const_tl(tcg_ctx, 0); + temp2 = tcg_const_tl(tcg_ctx, const4); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], temp, + temp2, tcg_ctx->cpu_gpr_d[r1]); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + break; + case OPC1_16_SRC_EQ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], + const4); + break; + case OPC1_16_SRC_LT: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], + const4); + break; + case OPC1_16_SRC_MOV: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_MOV_A: + const4 = MASK_OP_SRC_CONST4(ctx->opcode); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], const4); + break; + case OPC1_16_SRC_MOV_E: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], const4); + tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], 31); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC1_16_SRC_SH: + gen_shi(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], const4); + break; + case OPC1_16_SRC_SHA: + gen_shaci(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], const4); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_srr_opc(DisasContext *ctx, int op1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int r1, r2; + TCGv temp; + + r1 = MASK_OP_SRR_S1D(ctx->opcode); + r2 = MASK_OP_SRR_S2(ctx->opcode); + + switch (op1) { + case OPC1_16_SRR_ADD: + gen_add_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_A15: + gen_add_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_15A: + gen_add_d(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_ADD_A: + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_ADDS: + gen_adds(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_AND: + tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_CMOV: + temp = tcg_const_tl(tcg_ctx, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], temp, + tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1]); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC1_16_SRR_CMOVN: + temp = tcg_const_tl(tcg_ctx, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], temp, + tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1]); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC1_16_SRR_EQ: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_LT: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV_A: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_MOV_AA: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_MOV_D: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC1_16_SRR_MUL: + gen_mul_i32s(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_OR: + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB: + gen_sub_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB_A15B: + gen_sub_d(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUB_15AB: + gen_sub_d(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_SUBS: + gen_subs(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC1_16_SRR_XOR: + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_ssr_opc(DisasContext *ctx, int op1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int r1, r2; + + r1 = MASK_OP_SSR_S1(ctx->opcode); + r2 = MASK_OP_SSR_S2(ctx->opcode); + + switch (op1) { + case OPC1_16_SSR_ST_A: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + break; + case OPC1_16_SSR_ST_A_POSTINC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 4); + break; + case OPC1_16_SSR_ST_B: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + break; + case OPC1_16_SSR_ST_B_POSTINC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 1); + break; + case OPC1_16_SSR_ST_H: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); + break; + case OPC1_16_SSR_ST_H_POSTINC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 2); + break; + case OPC1_16_SSR_ST_W: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + break; + case OPC1_16_SSR_ST_W_POSTINC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 4); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_sc_opc(DisasContext *ctx, int op1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int32_t const16; + + const16 = MASK_OP_SC_CONST8(ctx->opcode); + + switch (op1) { + case OPC1_16_SC_AND: + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[15], const16); + break; + case OPC1_16_SC_BISR: + gen_helper_1arg(tcg_ctx, bisr, const16 & 0xff); + break; + case OPC1_16_SC_LD_A: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_a[15], tcg_ctx->cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_LD_W: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_MOV: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[15], const16); + break; + case OPC1_16_SC_OR: + tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_d[15], const16); + break; + case OPC1_16_SC_ST_A: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_a[15], tcg_ctx->cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_ST_W: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[10], const16 * 4, MO_LESL); + break; + case OPC1_16_SC_SUB_A: + tcg_gen_subi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[10], tcg_ctx->cpu_gpr_a[10], const16); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_slr_opc(DisasContext *ctx, int op1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int r1, r2; + + r1 = MASK_OP_SLR_D(ctx->opcode); + r2 = MASK_OP_SLR_S2(ctx->opcode); + + switch (op1) { +/* SLR-format */ + case OPC1_16_SLR_LD_A: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + break; + case OPC1_16_SLR_LD_A_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 4); + break; + case OPC1_16_SLR_LD_BU: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + break; + case OPC1_16_SLR_LD_BU_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 1); + break; + case OPC1_16_SLR_LD_H: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + break; + case OPC1_16_SLR_LD_H_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 2); + break; + case OPC1_16_SLR_LD_W: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + break; + case OPC1_16_SLR_LD_W_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], 4); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_sro_opc(DisasContext *ctx, int op1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int r2; + int32_t address; + + r2 = MASK_OP_SRO_S2(ctx->opcode); + address = MASK_OP_SRO_OFF4(ctx->opcode); + +/* SRO-format */ + switch (op1) { + case OPC1_16_SRO_LD_A: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_a[15], tcg_ctx->cpu_gpr_a[r2], address * 4, MO_LESL); + break; + case OPC1_16_SRO_LD_BU: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address, MO_UB); + break; + case OPC1_16_SRO_LD_H: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address, MO_LESW); + break; + case OPC1_16_SRO_LD_W: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address * 4, MO_LESL); + break; + case OPC1_16_SRO_ST_A: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_a[15], tcg_ctx->cpu_gpr_a[r2], address * 4, MO_LESL); + break; + case OPC1_16_SRO_ST_B: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address, MO_UB); + break; + case OPC1_16_SRO_ST_H: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address * 2, MO_LESW); + break; + case OPC1_16_SRO_ST_W: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[15], tcg_ctx->cpu_gpr_a[r2], address * 4, MO_LESL); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_sr_system(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + op2 = MASK_OP_SR_OP2(ctx->opcode); + + switch (op2) { + case OPC2_16_SR_NOP: + break; + case OPC2_16_SR_RET: + gen_compute_branch(ctx, op2, 0, 0, 0, 0); + break; + case OPC2_16_SR_RFE: + gen_helper_rfe(tcg_ctx, tcg_ctx->cpu_env); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + ctx->base.is_jmp = DISAS_NORETURN; + break; + case OPC2_16_SR_DEBUG: + /* raise EXCP_DEBUG */ + break; + case OPC2_16_SR_FRET: + gen_fret(ctx); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_sr_accu(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t r1; + TCGv temp; + + r1 = MASK_OP_SR_S1D(ctx->opcode); + op2 = MASK_OP_SR_OP2(ctx->opcode); + + switch (op2) { + case OPC2_16_SR_RSUB: + /* overflow only if r1 = -0x80000000 */ + temp = tcg_const_i32(tcg_ctx, -0x80000000); + /* calc V bit */ + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_gpr_d[r1], temp); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* calc SV bit */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* sub */ + tcg_gen_neg_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1]); + /* calc av */ + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_PSW_AV); + /* calc sav */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_AV); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_16_SR_SAT_B: + gen_saturate(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 0x7f, -0x80); + break; + case OPC2_16_SR_SAT_BU: + gen_saturate_u(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 0xff); + break; + case OPC2_16_SR_SAT_H: + gen_saturate(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 0x7fff, -0x8000); + break; + case OPC2_16_SR_SAT_HU: + gen_saturate_u(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 0xffff); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_16Bit_opc(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int op1; + int r1, r2; + int32_t const16; + int32_t address; + TCGv temp; + + op1 = MASK_OP_MAJOR(ctx->opcode); + + /* handle ADDSC.A opcode only being 6 bit long */ + if (unlikely((op1 & 0x3f) == OPC1_16_SRRS_ADDSC_A)) { + op1 = OPC1_16_SRRS_ADDSC_A; + } + + switch (op1) { + case OPC1_16_SRC_ADD: + case OPC1_16_SRC_ADD_A15: + case OPC1_16_SRC_ADD_15A: + case OPC1_16_SRC_ADD_A: + case OPC1_16_SRC_CADD: + case OPC1_16_SRC_CADDN: + case OPC1_16_SRC_CMOV: + case OPC1_16_SRC_CMOVN: + case OPC1_16_SRC_EQ: + case OPC1_16_SRC_LT: + case OPC1_16_SRC_MOV: + case OPC1_16_SRC_MOV_A: + case OPC1_16_SRC_MOV_E: + case OPC1_16_SRC_SH: + case OPC1_16_SRC_SHA: + decode_src_opc(ctx, op1); + break; +/* SRR-format */ + case OPC1_16_SRR_ADD: + case OPC1_16_SRR_ADD_A15: + case OPC1_16_SRR_ADD_15A: + case OPC1_16_SRR_ADD_A: + case OPC1_16_SRR_ADDS: + case OPC1_16_SRR_AND: + case OPC1_16_SRR_CMOV: + case OPC1_16_SRR_CMOVN: + case OPC1_16_SRR_EQ: + case OPC1_16_SRR_LT: + case OPC1_16_SRR_MOV: + case OPC1_16_SRR_MOV_A: + case OPC1_16_SRR_MOV_AA: + case OPC1_16_SRR_MOV_D: + case OPC1_16_SRR_MUL: + case OPC1_16_SRR_OR: + case OPC1_16_SRR_SUB: + case OPC1_16_SRR_SUB_A15B: + case OPC1_16_SRR_SUB_15AB: + case OPC1_16_SRR_SUBS: + case OPC1_16_SRR_XOR: + decode_srr_opc(ctx, op1); + break; +/* SSR-format */ + case OPC1_16_SSR_ST_A: + case OPC1_16_SSR_ST_A_POSTINC: + case OPC1_16_SSR_ST_B: + case OPC1_16_SSR_ST_B_POSTINC: + case OPC1_16_SSR_ST_H: + case OPC1_16_SSR_ST_H_POSTINC: + case OPC1_16_SSR_ST_W: + case OPC1_16_SSR_ST_W_POSTINC: + decode_ssr_opc(ctx, op1); + break; +/* SRRS-format */ + case OPC1_16_SRRS_ADDSC_A: + r2 = MASK_OP_SRRS_S2(ctx->opcode); + r1 = MASK_OP_SRRS_S1D(ctx->opcode); + const16 = MASK_OP_SRRS_N(ctx->opcode); + temp = tcg_temp_new(tcg_ctx); + tcg_gen_shli_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[15], const16); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], temp); + tcg_temp_free(tcg_ctx, temp); + break; +/* SLRO-format */ + case OPC1_16_SLRO_LD_A: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[15], const16 * 4, MO_LESL); + break; + case OPC1_16_SLRO_LD_BU: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16, MO_UB); + break; + case OPC1_16_SLRO_LD_H: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16 * 2, MO_LESW); + break; + case OPC1_16_SLRO_LD_W: + r1 = MASK_OP_SLRO_D(ctx->opcode); + const16 = MASK_OP_SLRO_OFF4(ctx->opcode); + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16 * 4, MO_LESL); + break; +/* SB-format */ + case OPC1_16_SB_CALL: + case OPC1_16_SB_J: + case OPC1_16_SB_JNZ: + case OPC1_16_SB_JZ: + address = MASK_OP_SB_DISP8_SEXT(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, 0, address); + break; +/* SBC-format */ + case OPC1_16_SBC_JEQ: + case OPC1_16_SBC_JNE: + address = MASK_OP_SBC_DISP4(ctx->opcode); + const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, const16, address); + break; + case OPC1_16_SBC_JEQ2: + case OPC1_16_SBC_JNE2: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + address = MASK_OP_SBC_DISP4(ctx->opcode); + const16 = MASK_OP_SBC_CONST4_SEXT(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, const16, address); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; +/* SBRN-format */ + case OPC1_16_SBRN_JNZ_T: + case OPC1_16_SBRN_JZ_T: + address = MASK_OP_SBRN_DISP4(ctx->opcode); + const16 = MASK_OP_SBRN_N(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, const16, address); + break; +/* SBR-format */ + case OPC1_16_SBR_JEQ2: + case OPC1_16_SBR_JNE2: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + r1 = MASK_OP_SBR_S2(ctx->opcode); + address = MASK_OP_SBR_DISP4(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, 0, address); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC1_16_SBR_JEQ: + case OPC1_16_SBR_JGEZ: + case OPC1_16_SBR_JGTZ: + case OPC1_16_SBR_JLEZ: + case OPC1_16_SBR_JLTZ: + case OPC1_16_SBR_JNE: + case OPC1_16_SBR_JNZ: + case OPC1_16_SBR_JNZ_A: + case OPC1_16_SBR_JZ: + case OPC1_16_SBR_JZ_A: + case OPC1_16_SBR_LOOP: + r1 = MASK_OP_SBR_S2(ctx->opcode); + address = MASK_OP_SBR_DISP4(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, 0, address); + break; +/* SC-format */ + case OPC1_16_SC_AND: + case OPC1_16_SC_BISR: + case OPC1_16_SC_LD_A: + case OPC1_16_SC_LD_W: + case OPC1_16_SC_MOV: + case OPC1_16_SC_OR: + case OPC1_16_SC_ST_A: + case OPC1_16_SC_ST_W: + case OPC1_16_SC_SUB_A: + decode_sc_opc(ctx, op1); + break; +/* SLR-format */ + case OPC1_16_SLR_LD_A: + case OPC1_16_SLR_LD_A_POSTINC: + case OPC1_16_SLR_LD_BU: + case OPC1_16_SLR_LD_BU_POSTINC: + case OPC1_16_SLR_LD_H: + case OPC1_16_SLR_LD_H_POSTINC: + case OPC1_16_SLR_LD_W: + case OPC1_16_SLR_LD_W_POSTINC: + decode_slr_opc(ctx, op1); + break; +/* SRO-format */ + case OPC1_16_SRO_LD_A: + case OPC1_16_SRO_LD_BU: + case OPC1_16_SRO_LD_H: + case OPC1_16_SRO_LD_W: + case OPC1_16_SRO_ST_A: + case OPC1_16_SRO_ST_B: + case OPC1_16_SRO_ST_H: + case OPC1_16_SRO_ST_W: + decode_sro_opc(ctx, op1); + break; +/* SSRO-format */ + case OPC1_16_SSRO_ST_A: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[15], const16 * 4, MO_LESL); + break; + case OPC1_16_SSRO_ST_B: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16, MO_UB); + break; + case OPC1_16_SSRO_ST_H: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16 * 2, MO_LESW); + break; + case OPC1_16_SSRO_ST_W: + r1 = MASK_OP_SSRO_S1(ctx->opcode); + const16 = MASK_OP_SSRO_OFF4(ctx->opcode); + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[15], const16 * 4, MO_LESL); + break; +/* SR-format */ + case OPCM_16_SR_SYSTEM: + decode_sr_system(ctx); + break; + case OPCM_16_SR_ACCU: + decode_sr_accu(ctx); + break; + case OPC1_16_SR_JI: + r1 = MASK_OP_SR_S1D(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, 0, 0); + break; + case OPC1_16_SR_NOT: + r1 = MASK_OP_SR_S1D(ctx->opcode); + tcg_gen_not_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* + * 32 bit instructions + */ + +/* ABS-format */ +static void decode_abs_ldw(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int32_t op2; + int32_t r1; + uint32_t address; + TCGv temp; + + r1 = MASK_OP_ABS_S1D(ctx->opcode); + address = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); + + switch (op2) { + case OPC2_32_ABS_LD_A: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); + break; + case OPC2_32_ABS_LD_D: + CHECK_REG_PAIR(r1); + gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp); + break; + case OPC2_32_ABS_LD_DA: + CHECK_REG_PAIR(r1); + gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp); + break; + case OPC2_32_ABS_LD_W: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + + tcg_temp_free(tcg_ctx, temp); +} + +static void decode_abs_ldb(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int32_t op2; + int32_t r1; + uint32_t address; + TCGv temp; + + r1 = MASK_OP_ABS_S1D(ctx->opcode); + address = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); + + switch (op2) { + case OPC2_32_ABS_LD_B: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB); + break; + case OPC2_32_ABS_LD_BU: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB); + break; + case OPC2_32_ABS_LD_H: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW); + break; + case OPC2_32_ABS_LD_HU: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + + tcg_temp_free(tcg_ctx, temp); +} + +static void decode_abs_ldst_swap(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int32_t op2; + int32_t r1; + uint32_t address; + TCGv temp; + + r1 = MASK_OP_ABS_S1D(ctx->opcode); + address = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); + + switch (op2) { + case OPC2_32_ABS_LDMST: + gen_ldmst(ctx, r1, temp); + break; + case OPC2_32_ABS_SWAP_W: + gen_swap(ctx, r1, temp); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + + tcg_temp_free(tcg_ctx, temp); +} + +static void decode_abs_ldst_context(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int32_t off18; + + off18 = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_ABS_LDLCX: + gen_helper_1arg(tcg_ctx, ldlcx, EA_ABS_FORMAT(off18)); + break; + case OPC2_32_ABS_LDUCX: + gen_helper_1arg(tcg_ctx, lducx, EA_ABS_FORMAT(off18)); + break; + case OPC2_32_ABS_STLCX: + gen_helper_1arg(tcg_ctx, stlcx, EA_ABS_FORMAT(off18)); + break; + case OPC2_32_ABS_STUCX: + gen_helper_1arg(tcg_ctx, stucx, EA_ABS_FORMAT(off18)); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_abs_store(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int32_t op2; + int32_t r1; + uint32_t address; + TCGv temp; + + r1 = MASK_OP_ABS_S1D(ctx->opcode); + address = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); + + switch (op2) { + case OPC2_32_ABS_ST_A: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); + break; + case OPC2_32_ABS_ST_D: + CHECK_REG_PAIR(r1); + gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp); + break; + case OPC2_32_ABS_ST_DA: + CHECK_REG_PAIR(r1); + gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp); + break; + case OPC2_32_ABS_ST_W: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); +} + +static void decode_abs_storeb_h(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int32_t op2; + int32_t r1; + uint32_t address; + TCGv temp; + + r1 = MASK_OP_ABS_S1D(ctx->opcode); + address = MASK_OP_ABS_OFF18(ctx->opcode); + op2 = MASK_OP_ABS_OP2(ctx->opcode); + + temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); + + switch (op2) { + case OPC2_32_ABS_ST_B: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB); + break; + case OPC2_32_ABS_ST_H: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); +} + +/* Bit-format */ + +static void decode_bit_andacc(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3; + int pos1, pos2; + + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + op2 = MASK_OP_BIT_OP2(ctx->opcode); + + + switch (op2) { + case OPC2_32_BIT_AND_AND_T: + gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_and_tl, &tcg_gen_and_tl); + break; + case OPC2_32_BIT_AND_ANDN_T: + gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_and_tl); + break; + case OPC2_32_BIT_AND_NOR_T: + if (TCG_TARGET_HAS_andc_i32) { + gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl, &tcg_gen_andc_tl); + } else { + gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_and_tl); + } + break; + case OPC2_32_BIT_AND_OR_T: + gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl, &tcg_gen_and_tl); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_bit_logical_t(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3; + int pos1, pos2; + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + op2 = MASK_OP_BIT_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_BIT_AND_T: + gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_and_tl); + break; + case OPC2_32_BIT_ANDN_T: + gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_andc_tl); + break; + case OPC2_32_BIT_NOR_T: + gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_nor_tl); + break; + case OPC2_32_BIT_OR_T: + gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_bit_insert(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3; + int pos1, pos2; + TCGv temp; + op2 = MASK_OP_BIT_OP2(ctx->opcode); + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + + tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], pos2); + if (op2 == OPC2_32_BIT_INSN_T) { + tcg_gen_not_tl(tcg_ctx, temp, temp); + } + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp, pos1, 1); + tcg_temp_free(tcg_ctx, temp); +} + +static void decode_bit_logical_t2(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + + int r1, r2, r3; + int pos1, pos2; + + op2 = MASK_OP_BIT_OP2(ctx->opcode); + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + + switch (op2) { + case OPC2_32_BIT_NAND_T: + gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_nand_tl); + break; + case OPC2_32_BIT_ORN_T: + gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_orc_tl); + break; + case OPC2_32_BIT_XNOR_T: + gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_eqv_tl); + break; + case OPC2_32_BIT_XOR_T: + gen_bit_1op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_xor_tl); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_bit_orand(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + + int r1, r2, r3; + int pos1, pos2; + + op2 = MASK_OP_BIT_OP2(ctx->opcode); + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + + switch (op2) { + case OPC2_32_BIT_OR_AND_T: + gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_and_tl, &tcg_gen_or_tl); + break; + case OPC2_32_BIT_OR_ANDN_T: + gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_andc_tl, &tcg_gen_or_tl); + break; + case OPC2_32_BIT_OR_NOR_T: + if (TCG_TARGET_HAS_orc_i32) { + gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl, &tcg_gen_orc_tl); + } else { + gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_nor_tl, &tcg_gen_or_tl); + } + break; + case OPC2_32_BIT_OR_OR_T: + gen_bit_2op(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl, &tcg_gen_or_tl); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_bit_sh_logic1(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3; + int pos1, pos2; + TCGv temp; + + op2 = MASK_OP_BIT_OP2(ctx->opcode); + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_BIT_SH_AND_T: + gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_and_tl); + break; + case OPC2_32_BIT_SH_ANDN_T: + gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_andc_tl); + break; + case OPC2_32_BIT_SH_NOR_T: + gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_nor_tl); + break; + case OPC2_32_BIT_SH_OR_T: + gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_or_tl); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], 1); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], temp); + tcg_temp_free(tcg_ctx, temp); +} + +static void decode_bit_sh_logic2(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3; + int pos1, pos2; + TCGv temp; + + op2 = MASK_OP_BIT_OP2(ctx->opcode); + r1 = MASK_OP_BIT_S1(ctx->opcode); + r2 = MASK_OP_BIT_S2(ctx->opcode); + r3 = MASK_OP_BIT_D(ctx->opcode); + pos1 = MASK_OP_BIT_POS1(ctx->opcode); + pos2 = MASK_OP_BIT_POS2(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_BIT_SH_NAND_T: + gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1] , tcg_ctx->cpu_gpr_d[r2] , + pos1, pos2, &tcg_gen_nand_tl); + break; + case OPC2_32_BIT_SH_ORN_T: + gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_orc_tl); + break; + case OPC2_32_BIT_SH_XNOR_T: + gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_eqv_tl); + break; + case OPC2_32_BIT_SH_XOR_T: + gen_bit_1op(ctx, temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos1, pos2, &tcg_gen_xor_tl); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], 1); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], temp); + tcg_temp_free(tcg_ctx, temp); +} + +/* BO-format */ + + +static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t off10; + int32_t r1, r2; + TCGv temp; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_BO_CACHEA_WI_SHORTOFF: + case OPC2_32_BO_CACHEA_W_SHORTOFF: + case OPC2_32_BO_CACHEA_I_SHORTOFF: + /* instruction to access the cache */ + break; + case OPC2_32_BO_CACHEA_WI_POSTINC: + case OPC2_32_BO_CACHEA_W_POSTINC: + case OPC2_32_BO_CACHEA_I_POSTINC: + /* instruction to access the cache, but we still need to handle + the addressing mode */ + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_CACHEA_WI_PREINC: + case OPC2_32_BO_CACHEA_W_PREINC: + case OPC2_32_BO_CACHEA_I_PREINC: + /* instruction to access the cache, but we still need to handle + the addressing mode */ + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_CACHEI_WI_SHORTOFF: + case OPC2_32_BO_CACHEI_W_SHORTOFF: + if (!has_feature(ctx, TRICORE_FEATURE_131)) { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC2_32_BO_CACHEI_W_POSTINC: + case OPC2_32_BO_CACHEI_WI_POSTINC: + if (has_feature(ctx, TRICORE_FEATURE_131)) { + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC2_32_BO_CACHEI_W_PREINC: + case OPC2_32_BO_CACHEI_WI_PREINC: + if (has_feature(ctx, TRICORE_FEATURE_131)) { + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC2_32_BO_ST_A_SHORTOFF: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LESL); + break; + case OPC2_32_BO_ST_A_POSTINC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_LESL); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_A_PREINC: + gen_st_preincr(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LESL); + break; + case OPC2_32_BO_ST_B_SHORTOFF: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_UB); + break; + case OPC2_32_BO_ST_B_POSTINC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_UB); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_B_PREINC: + gen_st_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_UB); + break; + case OPC2_32_BO_ST_D_SHORTOFF: + CHECK_REG_PAIR(r1); + gen_offset_st_2regs(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], + off10); + break; + case OPC2_32_BO_ST_D_POSTINC: + CHECK_REG_PAIR(r1); + gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2]); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_D_PREINC: + CHECK_REG_PAIR(r1); + temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], temp); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_32_BO_ST_DA_SHORTOFF: + CHECK_REG_PAIR(r1); + gen_offset_st_2regs(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], + off10); + break; + case OPC2_32_BO_ST_DA_POSTINC: + CHECK_REG_PAIR(r1); + gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_DA_PREINC: + CHECK_REG_PAIR(r1); + temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], temp); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_32_BO_ST_H_SHORTOFF: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); + break; + case OPC2_32_BO_ST_H_POSTINC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUW); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_H_PREINC: + gen_st_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); + break; + case OPC2_32_BO_ST_Q_SHORTOFF: + temp = tcg_temp_new(tcg_ctx); + tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + gen_offset_st(ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_32_BO_ST_Q_POSTINC: + temp = tcg_temp_new(tcg_ctx); + tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUW); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_32_BO_ST_Q_PREINC: + temp = tcg_temp_new(tcg_ctx); + tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + gen_st_preincr(ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_32_BO_ST_W_SHORTOFF: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); + break; + case OPC2_32_BO_ST_W_POSTINC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUL); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_ST_W_PREINC: + gen_st_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t off10; + int32_t r1, r2; + TCGv temp, temp2, temp3; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + temp3 = tcg_const_i32(tcg_ctx, off10); + CHECK_REG_PAIR(r2); + tcg_gen_ext16u_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2+1]); + tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); + + switch (op2) { + case OPC2_32_BO_CACHEA_WI_BR: + case OPC2_32_BO_CACHEA_W_BR: + case OPC2_32_BO_CACHEA_I_BR: + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_CACHEA_WI_CIRC: + case OPC2_32_BO_CACHEA_W_CIRC: + case OPC2_32_BO_CACHEA_I_CIRC: + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_A_BR: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_A_CIRC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_B_BR: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_B_CIRC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_D_BR: + CHECK_REG_PAIR(r1); + gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp2); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_D_CIRC: + CHECK_REG_PAIR(r1); + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2+1], 16); + tcg_gen_addi_tl(tcg_ctx, temp, temp, 4); + tcg_gen_rem_tl(tcg_ctx, temp, temp, temp2); + tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_DA_BR: + CHECK_REG_PAIR(r1); + gen_st_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp2); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_DA_CIRC: + CHECK_REG_PAIR(r1); + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2+1], 16); + tcg_gen_addi_tl(tcg_ctx, temp, temp, 4); + tcg_gen_rem_tl(tcg_ctx, temp, temp, temp2); + tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_H_BR: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_H_CIRC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_Q_BR: + tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_tl(tcg_ctx, temp, temp2, ctx->mem_idx, MO_LEUW); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_Q_CIRC: + tcg_gen_shri_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_tl(tcg_ctx, temp, temp2, ctx->mem_idx, MO_LEUW); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_ST_W_BR: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_ST_W_CIRC: + tcg_gen_qemu_st_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); +} + +static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t off10; + int32_t r1, r2; + TCGv temp; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_BO_LD_A_SHORTOFF: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); + break; + case OPC2_32_BO_LD_A_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUL); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_A_PREINC: + gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); + break; + case OPC2_32_BO_LD_B_SHORTOFF: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_SB); + break; + case OPC2_32_BO_LD_B_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_SB); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_B_PREINC: + gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_SB); + break; + case OPC2_32_BO_LD_BU_SHORTOFF: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_UB); + break; + case OPC2_32_BO_LD_BU_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_UB); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_BU_PREINC: + gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_SB); + break; + case OPC2_32_BO_LD_D_SHORTOFF: + CHECK_REG_PAIR(r1); + gen_offset_ld_2regs(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], + off10); + break; + case OPC2_32_BO_LD_D_POSTINC: + CHECK_REG_PAIR(r1); + gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2]); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_D_PREINC: + CHECK_REG_PAIR(r1); + temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], temp); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_32_BO_LD_DA_SHORTOFF: + CHECK_REG_PAIR(r1); + gen_offset_ld_2regs(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], + off10); + break; + case OPC2_32_BO_LD_DA_POSTINC: + CHECK_REG_PAIR(r1); + gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_DA_PREINC: + CHECK_REG_PAIR(r1); + temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], temp); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_32_BO_LD_H_SHORTOFF: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LESW); + break; + case OPC2_32_BO_LD_H_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_LESW); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_H_PREINC: + gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LESW); + break; + case OPC2_32_BO_LD_HU_SHORTOFF: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); + break; + case OPC2_32_BO_LD_HU_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUW); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_HU_PREINC: + gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); + break; + case OPC2_32_BO_LD_Q_SHORTOFF: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); + break; + case OPC2_32_BO_LD_Q_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUW); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_Q_PREINC: + gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUW); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); + break; + case OPC2_32_BO_LD_W_SHORTOFF: + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); + break; + case OPC2_32_BO_LD_W_POSTINC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], ctx->mem_idx, + MO_LEUL); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LD_W_PREINC: + gen_ld_preincr(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], off10, MO_LEUL); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t off10; + int r1, r2; + + TCGv temp, temp2, temp3; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + temp3 = tcg_const_i32(tcg_ctx, off10); + CHECK_REG_PAIR(r2); + tcg_gen_ext16u_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2+1]); + tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); + + + switch (op2) { + case OPC2_32_BO_LD_A_BR: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_A_CIRC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_B_BR: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_B_CIRC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_BU_BR: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_BU_CIRC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_D_BR: + CHECK_REG_PAIR(r1); + gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_d[r1+1], tcg_ctx->cpu_gpr_d[r1], temp2); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_D_CIRC: + CHECK_REG_PAIR(r1); + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2+1], 16); + tcg_gen_addi_tl(tcg_ctx, temp, temp, 4); + tcg_gen_rem_tl(tcg_ctx, temp, temp, temp2); + tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_DA_BR: + CHECK_REG_PAIR(r1); + gen_ld_2regs_64(ctx, tcg_ctx->cpu_gpr_a[r1+1], tcg_ctx->cpu_gpr_a[r1], temp2); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_DA_CIRC: + CHECK_REG_PAIR(r1); + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2+1], 16); + tcg_gen_addi_tl(tcg_ctx, temp, temp, 4); + tcg_gen_rem_tl(tcg_ctx, temp, temp, temp2); + tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_H_BR: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_H_CIRC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_HU_BR: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_HU_CIRC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_Q_BR: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_Q_CIRC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_LD_W_BR: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LD_W_CIRC: + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); +} + +static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t off10; + int r1, r2; + + TCGv temp, temp2; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_BO_LDLCX_SHORTOFF: + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_helper_ldlcx(tcg_ctx, tcg_ctx->cpu_env, temp); + break; + case OPC2_32_BO_LDMST_SHORTOFF: + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_ldmst(ctx, r1, temp); + break; + case OPC2_32_BO_LDMST_POSTINC: + gen_ldmst(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_LDMST_PREINC: + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + gen_ldmst(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC2_32_BO_LDUCX_SHORTOFF: + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_helper_lducx(tcg_ctx, tcg_ctx->cpu_env, temp); + break; + case OPC2_32_BO_LEA_SHORTOFF: + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_STLCX_SHORTOFF: + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_helper_stlcx(tcg_ctx, tcg_ctx->cpu_env, temp); + break; + case OPC2_32_BO_STUCX_SHORTOFF: + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_helper_stucx(tcg_ctx, tcg_ctx->cpu_env, temp); + break; + case OPC2_32_BO_SWAP_W_SHORTOFF: + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_swap(ctx, r1, temp); + break; + case OPC2_32_BO_SWAP_W_POSTINC: + gen_swap(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_SWAP_W_PREINC: + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + gen_swap(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC2_32_BO_CMPSWAP_W_SHORTOFF: + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_cmpswap(ctx, r1, temp); + break; + case OPC2_32_BO_CMPSWAP_W_POSTINC: + gen_cmpswap(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_CMPSWAP_W_PREINC: + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + gen_cmpswap(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC2_32_BO_SWAPMSK_W_SHORTOFF: + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], off10); + gen_swapmsk(ctx, r1, temp); + break; + case OPC2_32_BO_SWAPMSK_W_POSTINC: + gen_swapmsk(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + break; + case OPC2_32_BO_SWAPMSK_W_PREINC: + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r2], off10); + gen_swapmsk(ctx, r1, tcg_ctx->cpu_gpr_a[r2]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t off10; + int r1, r2; + + TCGv temp, temp2, temp3; + + r1 = MASK_OP_BO_S1D(ctx->opcode); + r2 = MASK_OP_BO_S2(ctx->opcode); + off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); + op2 = MASK_OP_BO_OP2(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + temp3 = tcg_const_i32(tcg_ctx, off10); + CHECK_REG_PAIR(r2); + tcg_gen_ext16u_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2+1]); + tcg_gen_add_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_a[r2], temp); + + switch (op2) { + case OPC2_32_BO_LDMST_BR: + gen_ldmst(ctx, r1, temp2); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_LDMST_CIRC: + gen_ldmst(ctx, r1, temp2); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_SWAP_W_BR: + gen_swap(ctx, r1, temp2); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_SWAP_W_CIRC: + gen_swap(ctx, r1, temp2); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_CMPSWAP_W_BR: + gen_cmpswap(ctx, r1, temp2); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_CMPSWAP_W_CIRC: + gen_cmpswap(ctx, r1, temp2); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + case OPC2_32_BO_SWAPMSK_W_BR: + gen_swapmsk(ctx, r1, temp2); + gen_helper_br_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1]); + break; + case OPC2_32_BO_SWAPMSK_W_CIRC: + gen_swapmsk(ctx, r1, temp2); + gen_helper_circ_update(tcg_ctx, tcg_ctx->cpu_gpr_a[r2+1], tcg_ctx->cpu_gpr_a[r2+1], temp3); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); +} + +static void decode_bol_opc(DisasContext *ctx, int32_t op1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int r1, r2; + int32_t address; + TCGv temp; + + r1 = MASK_OP_BOL_S1D(ctx->opcode); + r2 = MASK_OP_BOL_S2(ctx->opcode); + address = MASK_OP_BOL_OFF16_SEXT(ctx->opcode); + + switch (op1) { + case OPC1_32_BOL_LD_A_LONGOFF: + temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], address); + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC1_32_BOL_LD_W_LONGOFF: + temp = tcg_temp_new(tcg_ctx); + tcg_gen_addi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], address); + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC1_32_BOL_LEA_LONGOFF: + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], address); + break; + case OPC1_32_BOL_ST_A_LONGOFF: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + gen_offset_st(ctx, tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_LEUL); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC1_32_BOL_ST_W_LONGOFF: + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_LEUL); + break; + case OPC1_32_BOL_LD_B_LONGOFF: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_SB); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC1_32_BOL_LD_BU_LONGOFF: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_UB); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC1_32_BOL_LD_H_LONGOFF: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_LESW); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC1_32_BOL_LD_HU_LONGOFF: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + gen_offset_ld(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_LEUW); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC1_32_BOL_ST_B_LONGOFF: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_SB); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC1_32_BOL_ST_H_LONGOFF: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + gen_offset_st(ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_a[r2], address, MO_LESW); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RC format */ +static void decode_rc_logical_shift(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2; + int32_t const9; + TCGv temp; + + r2 = MASK_OP_RC_D(ctx->opcode); + r1 = MASK_OP_RC_S1(ctx->opcode); + const9 = MASK_OP_RC_CONST9(ctx->opcode); + op2 = MASK_OP_RC_OP2(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_RC_AND: + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ANDN: + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], ~const9); + break; + case OPC2_32_RC_NAND: + tcg_gen_movi_tl(tcg_ctx, temp, const9); + tcg_gen_nand_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_NOR: + tcg_gen_movi_tl(tcg_ctx, temp, const9); + tcg_gen_nor_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_OR: + tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ORN: + tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], ~const9); + break; + case OPC2_32_RC_SH: + const9 = sextract32(const9, 0, 6); + gen_shi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_H: + const9 = sextract32(const9, 0, 5); + gen_sh_hi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SHA: + const9 = sextract32(const9, 0, 6); + gen_shaci(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SHA_H: + const9 = sextract32(const9, 0, 5); + gen_sha_hi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SHAS: + gen_shasi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_XNOR: + tcg_gen_xori_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + tcg_gen_not_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RC_XOR: + tcg_gen_xori_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); +} + +static void decode_rc_accumulator(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2; + int16_t const9; + + TCGv temp; + + r2 = MASK_OP_RC_D(ctx->opcode); + r1 = MASK_OP_RC_S1(ctx->opcode); + const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode); + + op2 = MASK_OP_RC_OP2(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_RC_ABSDIF: + gen_absdifi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ABSDIFS: + gen_absdifsi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ADD: + gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ADDC: + gen_addci_CC(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ADDS: + gen_addsi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ADDS_U: + gen_addsui(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_ADDX: + gen_addi_CC(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_AND_EQ: + gen_accumulating_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_AND_GE: + gen_accumulating_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_AND_GE_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_AND_LT: + gen_accumulating_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_AND_LT_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_AND_NE: + gen_accumulating_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_and_tl); + break; + case OPC2_32_RC_EQ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_EQANY_B: + gen_eqany_bi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_EQANY_H: + gen_eqany_hi(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_GE: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_GE_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_LT: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_LT_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_MAX: + tcg_gen_movi_tl(tcg_ctx, temp, const9); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, + tcg_ctx->cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_MAX_U: + tcg_gen_movi_tl(tcg_ctx, temp, MASK_OP_RC_CONST9(ctx->opcode)); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, + tcg_ctx->cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_MIN: + tcg_gen_movi_tl(tcg_ctx, temp, const9); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, + tcg_ctx->cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_MIN_U: + tcg_gen_movi_tl(tcg_ctx, temp, MASK_OP_RC_CONST9(ctx->opcode)); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, + tcg_ctx->cpu_gpr_d[r1], temp); + break; + case OPC2_32_RC_NE: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_OR_EQ: + gen_accumulating_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_OR_GE: + gen_accumulating_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_OR_GE_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_OR_LT: + gen_accumulating_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_OR_LT_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_OR_NE: + gen_accumulating_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_or_tl); + break; + case OPC2_32_RC_RSUB: + tcg_gen_movi_tl(tcg_ctx, temp, const9); + gen_sub_d(ctx, tcg_ctx->cpu_gpr_d[r2], temp, tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RC_RSUBS: + tcg_gen_movi_tl(tcg_ctx, temp, const9); + gen_subs(ctx, tcg_ctx->cpu_gpr_d[r2], temp, tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RC_RSUBS_U: + tcg_gen_movi_tl(tcg_ctx, temp, const9); + gen_subsu(ctx, tcg_ctx->cpu_gpr_d[r2], temp, tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RC_SH_EQ: + gen_sh_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_GE: + gen_sh_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_GE_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_sh_condi(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_LT: + gen_sh_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_LT_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_sh_condi(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_SH_NE: + gen_sh_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_XOR_EQ: + gen_accumulating_condi(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + case OPC2_32_RC_XOR_GE: + gen_accumulating_condi(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + case OPC2_32_RC_XOR_GE_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + case OPC2_32_RC_XOR_LT: + gen_accumulating_condi(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + case OPC2_32_RC_XOR_LT_U: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_accumulating_condi(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + case OPC2_32_RC_XOR_NE: + gen_accumulating_condi(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], + const9, &tcg_gen_xor_tl); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); +} + +static void decode_rc_serviceroutine(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t const9; + + op2 = MASK_OP_RC_OP2(ctx->opcode); + const9 = MASK_OP_RC_CONST9(ctx->opcode); + + switch (op2) { + case OPC2_32_RC_BISR: + gen_helper_1arg(tcg_ctx, bisr, const9); + break; + case OPC2_32_RC_SYSCALL: + /* TODO: Add exception generation */ + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_rc_mul(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2; + int16_t const9; + + r2 = MASK_OP_RC_D(ctx->opcode); + r1 = MASK_OP_RC_S1(ctx->opcode); + const9 = MASK_OP_RC_CONST9_SEXT(ctx->opcode); + + op2 = MASK_OP_RC_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_RC_MUL_32: + gen_muli_i32s(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_MUL_64: + CHECK_REG_PAIR(r2); + gen_muli_i64s(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r2+1], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_MULS_32: + gen_mulsi_i32(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_MUL_U_64: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + CHECK_REG_PAIR(r2); + gen_muli_i64u(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r2+1], tcg_ctx->cpu_gpr_d[r1], const9); + break; + case OPC2_32_RC_MULS_U_32: + const9 = MASK_OP_RC_CONST9(ctx->opcode); + gen_mulsui_i32(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const9); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RCPW format */ +static void decode_rcpw_insert(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2; + int32_t pos, width, const4; + + TCGv temp; + + op2 = MASK_OP_RCPW_OP2(ctx->opcode); + r1 = MASK_OP_RCPW_S1(ctx->opcode); + r2 = MASK_OP_RCPW_D(ctx->opcode); + const4 = MASK_OP_RCPW_CONST4(ctx->opcode); + width = MASK_OP_RCPW_WIDTH(ctx->opcode); + pos = MASK_OP_RCPW_POS(ctx->opcode); + + switch (op2) { + case OPC2_32_RCPW_IMASK: + CHECK_REG_PAIR(r2); + /* if pos + width > 32 undefined result */ + if (pos + width <= 32) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2+1], ((1u << width) - 1) << pos); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], (const4 << pos)); + } + break; + case OPC2_32_RCPW_INSERT: + /* if pos + width > 32 undefined result */ + if (pos + width <= 32) { + temp = tcg_const_i32(tcg_ctx, const4); + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, pos, width); + tcg_temp_free(tcg_ctx, temp); + } + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RCRW format */ + +static void decode_rcrw_insert(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r3, r4; + int32_t width, const4; + + TCGv temp, temp2, temp3; + + op2 = MASK_OP_RCRW_OP2(ctx->opcode); + r1 = MASK_OP_RCRW_S1(ctx->opcode); + r3 = MASK_OP_RCRW_S3(ctx->opcode); + r4 = MASK_OP_RCRW_D(ctx->opcode); + width = MASK_OP_RCRW_WIDTH(ctx->opcode); + const4 = MASK_OP_RCRW_CONST4(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_RCRW_IMASK: + tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r4], 0x1f); + tcg_gen_movi_tl(tcg_ctx, temp2, (1 << width) - 1); + tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3 + 1], temp2, temp); + tcg_gen_movi_tl(tcg_ctx, temp2, const4); + tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], temp2, temp); + break; + case OPC2_32_RCRW_INSERT: + temp3 = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, temp, width); + tcg_gen_movi_tl(tcg_ctx, temp2, const4); + tcg_gen_andi_tl(tcg_ctx, temp3, tcg_ctx->cpu_gpr_d[r4], 0x1f); + gen_insert(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], temp2, temp, temp3); + + tcg_temp_free(tcg_ctx, temp3); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +/* RCR format */ + +static void decode_rcr_cond_select(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r3, r4; + int32_t const9; + + TCGv temp, temp2; + + op2 = MASK_OP_RCR_OP2(ctx->opcode); + r1 = MASK_OP_RCR_S1(ctx->opcode); + const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode); + r3 = MASK_OP_RCR_S3(ctx->opcode); + r4 = MASK_OP_RCR_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RCR_CADD: + gen_condi_add(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], const9, tcg_ctx->cpu_gpr_d[r4], + tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RCR_CADDN: + gen_condi_add(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], const9, tcg_ctx->cpu_gpr_d[r4], + tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RCR_SEL: + temp = tcg_const_i32(tcg_ctx, 0); + temp2 = tcg_const_i32(tcg_ctx, const9); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, + tcg_ctx->cpu_gpr_d[r1], temp2); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + break; + case OPC2_32_RCR_SELN: + temp = tcg_const_i32(tcg_ctx, 0); + temp2 = tcg_const_i32(tcg_ctx, const9); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, + tcg_ctx->cpu_gpr_d[r1], temp2); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_rcr_madd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r3, r4; + int32_t const9; + + + op2 = MASK_OP_RCR_OP2(ctx->opcode); + r1 = MASK_OP_RCR_S1(ctx->opcode); + const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode); + r3 = MASK_OP_RCR_S3(ctx->opcode); + r4 = MASK_OP_RCR_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RCR_MADD_32: + gen_maddi32_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MADD_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddi64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MADDS_32: + gen_maddsi_32(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MADDS_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsi_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MADD_U_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_maddui64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MADDS_U_32: + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_maddsui_32(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MADDS_U_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_maddsui_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_rcr_msub(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r3, r4; + int32_t const9; + + + op2 = MASK_OP_RCR_OP2(ctx->opcode); + r1 = MASK_OP_RCR_S1(ctx->opcode); + const9 = MASK_OP_RCR_CONST9_SEXT(ctx->opcode); + r3 = MASK_OP_RCR_S3(ctx->opcode); + r4 = MASK_OP_RCR_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RCR_MSUB_32: + gen_msubi32_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MSUB_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubi64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MSUBS_32: + gen_msubsi_32(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MSUBS_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubsi_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MSUB_U_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_msubui64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); + break; + case OPC2_32_RCR_MSUBS_U_32: + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_msubsui_32(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], const9); + break; + case OPC2_32_RCR_MSUBS_U_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + const9 = MASK_OP_RCR_CONST9(ctx->opcode); + gen_msubsui_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], const9); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RLC format */ + +static void decode_rlc_opc(DisasContext *ctx, + uint32_t op1) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int32_t const16; + int r1, r2; + + const16 = MASK_OP_RLC_CONST16_SEXT(ctx->opcode); + r1 = MASK_OP_RLC_S1(ctx->opcode); + r2 = MASK_OP_RLC_D(ctx->opcode); + + switch (op1) { + case OPC1_32_RLC_ADDI: + gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const16); + break; + case OPC1_32_RLC_ADDIH: + gen_addi_d(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], const16 << 16); + break; + case OPC1_32_RLC_ADDIH_A: + tcg_gen_addi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], tcg_ctx->cpu_gpr_a[r1], const16 << 16); + break; + case OPC1_32_RLC_MFCR: + const16 = MASK_OP_RLC_CONST16(ctx->opcode); + gen_mfcr(ctx, tcg_ctx->cpu_gpr_d[r2], const16); + break; + case OPC1_32_RLC_MOV: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], const16); + break; + case OPC1_32_RLC_MOV_64: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + CHECK_REG_PAIR(r2); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], const16); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2+1], const16 >> 15); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC1_32_RLC_MOV_U: + const16 = MASK_OP_RLC_CONST16(ctx->opcode); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], const16); + break; + case OPC1_32_RLC_MOV_H: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r2], const16 << 16); + break; + case OPC1_32_RLC_MOVH_A: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r2], const16 << 16); + break; + case OPC1_32_RLC_MTCR: + const16 = MASK_OP_RLC_CONST16(ctx->opcode); + gen_mtcr(ctx, tcg_ctx->cpu_gpr_d[r1], const16); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RR format */ +static void decode_rr_accumulator(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r3, r2, r1; + + TCGv temp; + + r3 = MASK_OP_RR_D(ctx->opcode); + r2 = MASK_OP_RR_S2(ctx->opcode); + r1 = MASK_OP_RR_S1(ctx->opcode); + op2 = MASK_OP_RR_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_RR_ABS: + gen_abs(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABS_B: + gen_helper_abs_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABS_H: + gen_helper_abs_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSDIF: + gen_absdif(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSDIF_B: + gen_helper_absdif_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSDIF_H: + gen_helper_absdif_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSDIFS: + gen_helper_absdif_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSDIFS_H: + gen_helper_absdif_h_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSS: + gen_helper_abs_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ABSS_H: + gen_helper_abs_h_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADD: + gen_add_d(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADD_B: + gen_helper_add_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADD_H: + gen_helper_add_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDC: + gen_addc_CC(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDS: + gen_adds(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDS_H: + gen_helper_add_h_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDS_HU: + gen_helper_add_h_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDS_U: + gen_helper_add_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ADDX: + gen_add_CC(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_AND_EQ: + gen_accumulating_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_AND_GE: + gen_accumulating_cond(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_AND_GE_U: + gen_accumulating_cond(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_AND_LT: + gen_accumulating_cond(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_AND_LT_U: + gen_accumulating_cond(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_AND_NE: + gen_accumulating_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_and_tl); + break; + case OPC2_32_RR_EQ: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_EQ_B: + gen_helper_eq_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_EQ_H: + gen_helper_eq_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_EQ_W: + gen_cond_w(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_EQANY_B: + gen_helper_eqany_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_EQANY_H: + gen_helper_eqany_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_GE: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_GE_U: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_U: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_B: + gen_helper_lt_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_BU: + gen_helper_lt_bu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_H: + gen_helper_lt_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_HU: + gen_helper_lt_hu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_W: + gen_cond_w(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_LT_WU: + gen_cond_w(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX_U: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_GTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX_B: + gen_helper_max_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX_BU: + gen_helper_max_bu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX_H: + gen_helper_max_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MAX_HU: + gen_helper_max_hu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN_U: + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN_B: + gen_helper_min_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN_BU: + gen_helper_min_bu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN_H: + gen_helper_min_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MIN_HU: + gen_helper_min_hu(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MOV: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MOV_64: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + temp = tcg_temp_new(tcg_ctx); + + CHECK_REG_PAIR(r3); + tcg_gen_mov_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3 + 1], temp); + + tcg_temp_free(tcg_ctx, temp); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC2_32_RR_MOVS_64: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + CHECK_REG_PAIR(r3); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); + tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3 + 1], tcg_ctx->cpu_gpr_d[r2], 31); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC2_32_RR_NE: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_OR_EQ: + gen_accumulating_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_OR_GE: + gen_accumulating_cond(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_OR_GE_U: + gen_accumulating_cond(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_OR_LT: + gen_accumulating_cond(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_OR_LT_U: + gen_accumulating_cond(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_OR_NE: + gen_accumulating_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_or_tl); + break; + case OPC2_32_RR_SAT_B: + gen_saturate(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 0x7f, -0x80); + break; + case OPC2_32_RR_SAT_BU: + gen_saturate_u(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 0xff); + break; + case OPC2_32_RR_SAT_H: + gen_saturate(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 0x7fff, -0x8000); + break; + case OPC2_32_RR_SAT_HU: + gen_saturate_u(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 0xffff); + break; + case OPC2_32_RR_SH_EQ: + gen_sh_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_GE: + gen_sh_cond(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_GE_U: + gen_sh_cond(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_LT: + gen_sh_cond(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_LT_U: + gen_sh_cond(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_NE: + gen_sh_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUB: + gen_sub_d(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUB_B: + gen_helper_sub_b(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUB_H: + gen_helper_sub_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBC: + gen_subc_CC(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBS: + gen_subs(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBS_U: + gen_subsu(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBS_H: + gen_helper_sub_h_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBS_HU: + gen_helper_sub_h_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SUBX: + gen_sub_CC(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_XOR_EQ: + gen_accumulating_cond(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + case OPC2_32_RR_XOR_GE: + gen_accumulating_cond(ctx, TCG_COND_GE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + case OPC2_32_RR_XOR_GE_U: + gen_accumulating_cond(ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + case OPC2_32_RR_XOR_LT: + gen_accumulating_cond(ctx, TCG_COND_LT, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + case OPC2_32_RR_XOR_LT_U: + gen_accumulating_cond(ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + case OPC2_32_RR_XOR_NE: + gen_accumulating_cond(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], &tcg_gen_xor_tl); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_rr_logical_shift(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r3, r2, r1; + TCGv temp; + + r3 = MASK_OP_RR_D(ctx->opcode); + r2 = MASK_OP_RR_S2(ctx->opcode); + r1 = MASK_OP_RR_S1(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + op2 = MASK_OP_RR_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_RR_AND: + tcg_gen_and_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ANDN: + tcg_gen_andc_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_CLO: + tcg_gen_not_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_clzi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], TARGET_LONG_BITS); + break; + case OPC2_32_RR_CLO_H: + gen_helper_clo_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_CLS: + tcg_gen_clrsb_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_CLS_H: + gen_helper_cls_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_CLZ: + tcg_gen_clzi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], TARGET_LONG_BITS); + break; + case OPC2_32_RR_CLZ_H: + gen_helper_clz_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_NAND: + tcg_gen_nand_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_NOR: + tcg_gen_nor_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_OR: + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_ORN: + tcg_gen_orc_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH: + gen_helper_sh(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SH_H: + gen_helper_sh_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SHA: + gen_helper_sha(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SHA_H: + gen_helper_sha_h(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_SHAS: + gen_shas(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_XNOR: + tcg_gen_eqv_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_XOR: + tcg_gen_xor_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); +} + +static void decode_rr_address(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2, n; + int r1, r2, r3; + TCGv temp; + + op2 = MASK_OP_RR_OP2(ctx->opcode); + r3 = MASK_OP_RR_D(ctx->opcode); + r2 = MASK_OP_RR_S2(ctx->opcode); + r1 = MASK_OP_RR_S1(ctx->opcode); + n = MASK_OP_RR_N(ctx->opcode); + + switch (op2) { + case OPC2_32_RR_ADD_A: + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC2_32_RR_ADDSC_A: + temp = tcg_temp_new(tcg_ctx); + tcg_gen_shli_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], n); + tcg_gen_add_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], tcg_ctx->cpu_gpr_a[r2], temp); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_32_RR_ADDSC_AT: + temp = tcg_temp_new(tcg_ctx); + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 3); + tcg_gen_add_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_a[r2], temp); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], temp, 0xFFFFFFFC); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_32_RR_EQ_A: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], + tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC2_32_RR_EQZ: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], 0); + break; + case OPC2_32_RR_GE_A: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GEU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], + tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC2_32_RR_LT_A: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_LTU, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], + tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC2_32_RR_MOV_A: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_MOV_AA: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC2_32_RR_MOV_D: + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC2_32_RR_NE_A: + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], + tcg_ctx->cpu_gpr_a[r2]); + break; + case OPC2_32_RR_NEZ_A: + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_a[r1], 0); + break; + case OPC2_32_RR_SUB_A: + tcg_gen_sub_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r3], tcg_ctx->cpu_gpr_a[r1], tcg_ctx->cpu_gpr_a[r2]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_rr_idirect(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1; + + op2 = MASK_OP_RR_OP2(ctx->opcode); + r1 = MASK_OP_RR_S1(ctx->opcode); + + switch (op2) { + case OPC2_32_RR_JI: + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PC, tcg_ctx->cpu_gpr_a[r1], ~0x1); + break; + case OPC2_32_RR_JLI: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[11], ctx->pc_succ_insn); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PC, tcg_ctx->cpu_gpr_a[r1], ~0x1); + break; + case OPC2_32_RR_CALLI: + gen_helper_1arg(tcg_ctx, call, ctx->pc_succ_insn); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PC, tcg_ctx->cpu_gpr_a[r1], ~0x1); + break; + case OPC2_32_RR_FCALLI: + gen_fcall_save_ctx(ctx); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_PC, tcg_ctx->cpu_gpr_a[r1], ~0x1); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + ctx->base.is_jmp = DISAS_NORETURN; +} + +static void decode_rr_divide(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3; + + TCGv temp, temp2, temp3; + + op2 = MASK_OP_RR_OP2(ctx->opcode); + r3 = MASK_OP_RR_D(ctx->opcode); + r2 = MASK_OP_RR_S2(ctx->opcode); + r1 = MASK_OP_RR_S1(ctx->opcode); + + switch (op2) { + case OPC2_32_RR_BMERGE: + gen_helper_bmerge(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_BSPLIT: + CHECK_REG_PAIR(r3); + gen_bsplit(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_DVINIT_B: + CHECK_REG_PAIR(r3); + gen_dvinit_b(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_DVINIT_BU: + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + temp3 = tcg_temp_new(tcg_ctx); + CHECK_REG_PAIR(r3); + tcg_gen_shri_tl(tcg_ctx, temp3, tcg_ctx->cpu_gpr_d[r1], 8); + /* reset av */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, 0); + if (!has_feature(ctx, TRICORE_FEATURE_131)) { + /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */ + tcg_gen_abs_tl(tcg_ctx, temp, temp3); + tcg_gen_abs_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_PSW_V, temp, temp2); + } else { + /* overflow = (D[b] == 0) */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_gpr_d[r2], 0); + } + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* sv */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* write result */ + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 24); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3+1], temp3); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); + break; + case OPC2_32_RR_DVINIT_H: + CHECK_REG_PAIR(r3); + gen_dvinit_h(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_DVINIT_HU: + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + temp3 = tcg_temp_new(tcg_ctx); + CHECK_REG_PAIR(r3); + tcg_gen_shri_tl(tcg_ctx, temp3, tcg_ctx->cpu_gpr_d[r1], 16); + /* reset av */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, 0); + if (!has_feature(ctx, TRICORE_FEATURE_131)) { + /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */ + tcg_gen_abs_tl(tcg_ctx, temp, temp3); + tcg_gen_abs_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + tcg_gen_setcond_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_PSW_V, temp, temp2); + } else { + /* overflow = (D[b] == 0) */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_gpr_d[r2], 0); + } + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* sv */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* write result */ + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3+1], temp3); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); + break; + case OPC2_32_RR_DVINIT: + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + CHECK_REG_PAIR(r3); + /* overflow = ((D[b] == 0) || + ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp, tcg_ctx->cpu_gpr_d[r2], 0xffffffff); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, tcg_ctx->cpu_gpr_d[r1], 0x80000000); + tcg_gen_and_tl(tcg_ctx, temp, temp, temp2); + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, temp2, tcg_ctx->cpu_gpr_d[r2], 0); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, temp, temp2); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* sv */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* reset av */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, 0); + /* write result */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); + /* sign extend to high reg */ + tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], 31); + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + break; + case OPC2_32_RR_DVINIT_U: + /* overflow = (D[b] == 0) */ + tcg_gen_setcondi_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_gpr_d[r2], 0); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, tcg_ctx->cpu_PSW_V, 31); + /* sv */ + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + /* reset av */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, 0); + /* write result */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); + /* zero extend to high reg*/ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3+1], 0); + break; + case OPC2_32_RR_PARITY: + gen_helper_parity(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_UNPACK: + CHECK_REG_PAIR(r3); + gen_unpack(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_CRC32: + if (has_feature(ctx, TRICORE_FEATURE_161)) { + gen_helper_crc32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC2_32_RR_DIV: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + GEN_HELPER_RR(tcg_ctx, divide, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC2_32_RR_DIV_U: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + GEN_HELPER_RR(tcg_ctx, divide_u, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], + tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC2_32_RR_MUL_F: + gen_helper_fmul(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_DIV_F: + gen_helper_fdiv(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_CMP_F: + gen_helper_fcmp(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR_FTOI: + gen_helper_ftoi(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_ITOF: + gen_helper_itof(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_FTOUZ: + gen_helper_ftouz(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_UPDFL: + gen_helper_updfl(tcg_ctx, tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_UTOF: + gen_helper_utof(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_FTOIZ: + gen_helper_ftoiz(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RR_QSEED_F: + gen_helper_qseed(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RR1 Format */ +static void decode_rr1_mul(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + + int r1, r2, r3; + TCGv n; + TCGv_i64 temp64; + + r1 = MASK_OP_RR1_S1(ctx->opcode); + r2 = MASK_OP_RR1_S2(ctx->opcode); + r3 = MASK_OP_RR1_D(ctx->opcode); + n = tcg_const_i32(tcg_ctx, MASK_OP_RR1_N(ctx->opcode)); + op2 = MASK_OP_RR1_OP2(ctx->opcode); + + switch (op2) { + case OPC2_32_RR1_MUL_H_32_LL: + temp64 = tcg_temp_new_i64(tcg_ctx); + CHECK_REG_PAIR(r3); + GEN_HELPER_LL(tcg_ctx, mul_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); + gen_calc_usb_mul_h(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1]); + tcg_temp_free_i64(tcg_ctx, temp64); + break; + case OPC2_32_RR1_MUL_H_32_LU: + temp64 = tcg_temp_new_i64(tcg_ctx); + CHECK_REG_PAIR(r3); + GEN_HELPER_LU(tcg_ctx, mul_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); + gen_calc_usb_mul_h(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1]); + tcg_temp_free_i64(tcg_ctx, temp64); + break; + case OPC2_32_RR1_MUL_H_32_UL: + temp64 = tcg_temp_new_i64(tcg_ctx); + CHECK_REG_PAIR(r3); + GEN_HELPER_UL(tcg_ctx, mul_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); + gen_calc_usb_mul_h(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1]); + tcg_temp_free_i64(tcg_ctx, temp64); + break; + case OPC2_32_RR1_MUL_H_32_UU: + temp64 = tcg_temp_new_i64(tcg_ctx); + CHECK_REG_PAIR(r3); + GEN_HELPER_UU(tcg_ctx, mul_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); + gen_calc_usb_mul_h(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1]); + tcg_temp_free_i64(tcg_ctx, temp64); + break; + case OPC2_32_RR1_MULM_H_64_LL: + temp64 = tcg_temp_new_i64(tcg_ctx); + CHECK_REG_PAIR(r3); + GEN_HELPER_LL(tcg_ctx, mulm_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); + /* reset V bit */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + /* reset AV bit */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_V); + tcg_temp_free_i64(tcg_ctx, temp64); + break; + case OPC2_32_RR1_MULM_H_64_LU: + temp64 = tcg_temp_new_i64(tcg_ctx); + CHECK_REG_PAIR(r3); + GEN_HELPER_LU(tcg_ctx, mulm_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); + /* reset V bit */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + /* reset AV bit */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_V); + tcg_temp_free_i64(tcg_ctx, temp64); + break; + case OPC2_32_RR1_MULM_H_64_UL: + temp64 = tcg_temp_new_i64(tcg_ctx); + CHECK_REG_PAIR(r3); + GEN_HELPER_UL(tcg_ctx, mulm_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); + /* reset V bit */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + /* reset AV bit */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_V); + tcg_temp_free_i64(tcg_ctx, temp64); + break; + case OPC2_32_RR1_MULM_H_64_UU: + temp64 = tcg_temp_new_i64(tcg_ctx); + CHECK_REG_PAIR(r3); + GEN_HELPER_UU(tcg_ctx, mulm_h, temp64, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + tcg_gen_extr_i64_i32(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], temp64); + /* reset V bit */ + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + /* reset AV bit */ + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_V); + tcg_temp_free_i64(tcg_ctx, temp64); + + break; + case OPC2_32_RR1_MULR_H_16_LL: + GEN_HELPER_LL(tcg_ctx, mulr_h, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + gen_calc_usb_mulr_h(ctx, tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RR1_MULR_H_16_LU: + GEN_HELPER_LU(tcg_ctx, mulr_h, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + gen_calc_usb_mulr_h(ctx, tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RR1_MULR_H_16_UL: + GEN_HELPER_UL(tcg_ctx, mulr_h, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + gen_calc_usb_mulr_h(ctx, tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RR1_MULR_H_16_UU: + GEN_HELPER_UU(tcg_ctx, mulr_h, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n); + gen_calc_usb_mulr_h(ctx, tcg_ctx->cpu_gpr_d[r3]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, n); +} + +static void decode_rr1_mulq(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3; + uint32_t n; + + TCGv temp, temp2; + + r1 = MASK_OP_RR1_S1(ctx->opcode); + r2 = MASK_OP_RR1_S2(ctx->opcode); + r3 = MASK_OP_RR1_D(ctx->opcode); + n = MASK_OP_RR1_N(ctx->opcode); + op2 = MASK_OP_RR1_OP2(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + temp2 = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_RR1_MUL_Q_32: + gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], temp, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 32); + break; + case OPC2_32_RR1_MUL_Q_64: + CHECK_REG_PAIR(r3); + gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, 0); + break; + case OPC2_32_RR1_MUL_Q_32_L: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); + gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], temp, tcg_ctx->cpu_gpr_d[r1], temp, n, 16); + break; + case OPC2_32_RR1_MUL_Q_64_L: + CHECK_REG_PAIR(r3); + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); + gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n, 0); + break; + case OPC2_32_RR1_MUL_Q_32_U: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); + gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], temp, tcg_ctx->cpu_gpr_d[r1], temp, n, 16); + break; + case OPC2_32_RR1_MUL_Q_64_U: + CHECK_REG_PAIR(r3); + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); + gen_mul_q(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, n, 0); + break; + case OPC2_32_RR1_MUL_Q_32_LL: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_mul_q_16(ctx, tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RR1_MUL_Q_32_UU: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_mul_q_16(ctx, tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RR1_MULR_Q_32_L: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_mulr_q(ctx, tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RR1_MULR_Q_32_U: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_mulr_q(ctx, tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +/* RR2 format */ +static void decode_rr2_mul(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3; + + op2 = MASK_OP_RR2_OP2(ctx->opcode); + r1 = MASK_OP_RR2_S1(ctx->opcode); + r2 = MASK_OP_RR2_S2(ctx->opcode); + r3 = MASK_OP_RR2_D(ctx->opcode); + switch (op2) { + case OPC2_32_RR2_MUL_32: + gen_mul_i32s(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR2_MUL_64: + CHECK_REG_PAIR(r3); + gen_mul_i64s(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR2_MULS_32: + gen_helper_mul_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR2_MUL_U_64: + CHECK_REG_PAIR(r3); + gen_mul_i64u(ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RR2_MULS_U_32: + gen_helper_mul_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RRPW format */ +static void decode_rrpw_extract_insert(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3; + int32_t pos, width; + TCGv temp; + + op2 = MASK_OP_RRPW_OP2(ctx->opcode); + r1 = MASK_OP_RRPW_S1(ctx->opcode); + r2 = MASK_OP_RRPW_S2(ctx->opcode); + r3 = MASK_OP_RRPW_D(ctx->opcode); + pos = MASK_OP_RRPW_POS(ctx->opcode); + width = MASK_OP_RRPW_WIDTH(ctx->opcode); + + switch (op2) { + case OPC2_32_RRPW_EXTR: + if (width == 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], 0); + break; + } + + if (pos + width <= 32) { + /* optimize special cases */ + if ((pos == 0) && (width == 8)) { + tcg_gen_ext8s_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); + } else if ((pos == 0) && (width == 16)) { + tcg_gen_ext16s_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1]); + } else { + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], 32 - pos - width); + tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], 32 - width); + } + } + break; + case OPC2_32_RRPW_EXTR_U: + if (width == 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], 0); + } else { + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], pos); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], ~0u >> (32-width)); + } + break; + case OPC2_32_RRPW_IMASK: + CHECK_REG_PAIR(r3); + + if (pos + width <= 32) { + temp = tcg_temp_new(tcg_ctx); + tcg_gen_movi_tl(tcg_ctx, temp, ((1u << width) - 1) << pos); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2], pos); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3 + 1], temp); + tcg_temp_free(tcg_ctx, temp); + } + + break; + case OPC2_32_RRPW_INSERT: + if (pos + width <= 32) { + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + pos, width); + } + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RRR format */ +static void decode_rrr_cond_select(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3, r4; + TCGv temp; + + op2 = MASK_OP_RRR_OP2(ctx->opcode); + r1 = MASK_OP_RRR_S1(ctx->opcode); + r2 = MASK_OP_RRR_S2(ctx->opcode); + r3 = MASK_OP_RRR_S3(ctx->opcode); + r4 = MASK_OP_RRR_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR_CADD: + gen_cond_add(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_CADDN: + gen_cond_add(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r4], + tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_CSUB: + gen_cond_sub(ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r4], + tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_CSUBN: + gen_cond_sub(ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r4], + tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_SEL: + temp = tcg_const_i32(tcg_ctx, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_NE, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, + tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC2_32_RRR_SELN: + temp = tcg_const_i32(tcg_ctx, 0); + tcg_gen_movcond_tl(tcg_ctx, TCG_COND_EQ, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, + tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2]); + tcg_temp_free(tcg_ctx, temp); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_rrr_divide(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + + int r1, r2, r3, r4; + + op2 = MASK_OP_RRR_OP2(ctx->opcode); + r1 = MASK_OP_RRR_S1(ctx->opcode); + r2 = MASK_OP_RRR_S2(ctx->opcode); + r3 = MASK_OP_RRR_S3(ctx->opcode); + r4 = MASK_OP_RRR_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR_DVADJ: + CHECK_REG_PAIR(r3); + CHECK_REG_PAIR(r4); + GEN_HELPER_RRR(tcg_ctx, dvadj, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_DVSTEP: + CHECK_REG_PAIR(r3); + CHECK_REG_PAIR(r4); + GEN_HELPER_RRR(tcg_ctx, dvstep, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_DVSTEP_U: + CHECK_REG_PAIR(r3); + CHECK_REG_PAIR(r4); + GEN_HELPER_RRR(tcg_ctx, dvstep_u, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_IXMAX: + CHECK_REG_PAIR(r3); + CHECK_REG_PAIR(r4); + GEN_HELPER_RRR(tcg_ctx, ixmax, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_IXMAX_U: + CHECK_REG_PAIR(r3); + CHECK_REG_PAIR(r4); + GEN_HELPER_RRR(tcg_ctx, ixmax_u, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_IXMIN: + CHECK_REG_PAIR(r3); + CHECK_REG_PAIR(r4); + GEN_HELPER_RRR(tcg_ctx, ixmin, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_IXMIN_U: + CHECK_REG_PAIR(r3); + CHECK_REG_PAIR(r4); + GEN_HELPER_RRR(tcg_ctx, ixmin_u, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR_PACK: + CHECK_REG_PAIR(r3); + gen_helper_pack(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_PSW_C, tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1]); + break; + case OPC2_32_RRR_ADD_F: + gen_helper_fadd(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_SUB_F: + gen_helper_fsub(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_MADD_F: + gen_helper_fmadd(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r3]); + break; + case OPC2_32_RRR_MSUB_F: + gen_helper_fmsub(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r3]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RRR2 format */ +static void decode_rrr2_madd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t r1, r2, r3, r4; + + op2 = MASK_OP_RRR2_OP2(ctx->opcode); + r1 = MASK_OP_RRR2_S1(ctx->opcode); + r2 = MASK_OP_RRR2_S2(ctx->opcode); + r3 = MASK_OP_RRR2_S3(ctx->opcode); + r4 = MASK_OP_RRR2_D(ctx->opcode); + switch (op2) { + case OPC2_32_RRR2_MADD_32: + gen_madd32_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADD_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madd64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADDS_32: + gen_helper_madd32_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADDS_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madds_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADD_U_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddu64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADDS_U_32: + gen_helper_madd32_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MADDS_U_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsu_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_rrr2_msub(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t r1, r2, r3, r4; + + op2 = MASK_OP_RRR2_OP2(ctx->opcode); + r1 = MASK_OP_RRR2_S1(ctx->opcode); + r2 = MASK_OP_RRR2_S2(ctx->opcode); + r3 = MASK_OP_RRR2_S3(ctx->opcode); + r4 = MASK_OP_RRR2_D(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR2_MSUB_32: + gen_msub32_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUB_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msub64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUBS_32: + gen_helper_msub32_ssov(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUBS_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubs_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUB_U_64: + gen_msubu64_d(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUBS_U_32: + gen_helper_msub32_suov(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_env, tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2]); + break; + case OPC2_32_RRR2_MSUBS_U_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubsu_64(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r2]); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RRR1 format */ +static void decode_rrr1_madd(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR1_MADD_H_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madd_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADD_H_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madd_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADD_H_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madd_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADD_H_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madd_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDS_H_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madds_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDS_H_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madds_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDS_H_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madds_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDS_H_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madds_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDM_H_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDM_H_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDM_H_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDM_H_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDMS_H_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDMS_H_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDMS_H_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDMS_H_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDR_H_LL: + gen_maddr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDR_H_LU: + gen_maddr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDR_H_UL: + gen_maddr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDR_H_UU: + gen_maddr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDRS_H_LL: + gen_maddr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDRS_H_LU: + gen_maddr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDRS_H_UL: + gen_maddr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDRS_H_UU: + gen_maddr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_rrr1_maddq_h(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + TCGv temp, temp2; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + temp = tcg_const_i32(tcg_ctx, n); + temp2 = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_RRR1_MADD_Q_32: + gen_madd32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, 32); + break; + case OPC2_32_RRR1_MADD_Q_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madd64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n); + break; + case OPC2_32_RRR1_MADD_Q_32_L: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); + gen_madd32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MADD_Q_64_L: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); + gen_madd64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MADD_Q_32_U: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); + gen_madd32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MADD_Q_64_U: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); + gen_madd64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MADD_Q_32_LL: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_m16add32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADD_Q_64_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_m16add64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MADD_Q_32_UU: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_m16add32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADD_Q_64_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_m16add64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDS_Q_32: + gen_madds32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, 32); + break; + case OPC2_32_RRR1_MADDS_Q_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_madds64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n); + break; + case OPC2_32_RRR1_MADDS_Q_32_L: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); + gen_madds32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MADDS_Q_64_L: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); + gen_madds64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MADDS_Q_32_U: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); + gen_madds32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MADDS_Q_64_U: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); + gen_madds64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MADDS_Q_32_LL: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_m16adds32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDS_Q_64_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_m16adds64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDS_Q_32_UU: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_m16adds32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDS_Q_64_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_m16adds64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDR_H_64_UL: + CHECK_REG_PAIR(r3); + gen_maddr64_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], + tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 2); + break; + case OPC2_32_RRR1_MADDRS_H_64_UL: + CHECK_REG_PAIR(r3); + gen_maddr64s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], + tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 2); + break; + case OPC2_32_RRR1_MADDR_Q_32_LL: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_maddr_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDR_Q_32_UU: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_maddr_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDRS_Q_32_LL: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_maddrs_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MADDRS_Q_32_UU: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_maddrs_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static void decode_rrr1_maddsu_h(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR1_MADDSU_H_32_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsu_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSU_H_32_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsu_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSU_H_32_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsu_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSU_H_32_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsu_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDSUS_H_32_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsus_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSUS_H_32_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsus_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSUS_H_32_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsus_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSUS_H_32_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsus_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MADDSUM_H_64_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsum_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSUM_H_64_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsum_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSUM_H_64_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsum_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSUM_H_64_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsum_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MADDSUMS_H_64_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsums_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSUMS_H_64_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsums_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSUMS_H_64_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsums_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSUMS_H_64_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_maddsums_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MADDSUR_H_16_LL: + gen_maddsur32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSUR_H_16_LU: + gen_maddsur32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSUR_H_16_UL: + gen_maddsur32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSUR_H_16_UU: + gen_maddsur32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MADDSURS_H_16_LL: + gen_maddsur32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MADDSURS_H_16_LU: + gen_maddsur32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MADDSURS_H_16_UL: + gen_maddsur32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MADDSURS_H_16_UU: + gen_maddsur32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_rrr1_msub(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR1_MSUB_H_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msub_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUB_H_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msub_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUB_H_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msub_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUB_H_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msub_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBS_H_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubs_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBS_H_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubs_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBS_H_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubs_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBS_H_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubs_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBM_H_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBM_H_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBM_H_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBM_H_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBMS_H_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBMS_H_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBMS_H_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBMS_H_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBR_H_LL: + gen_msubr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBR_H_LU: + gen_msubr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBR_H_UL: + gen_msubr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBR_H_UU: + gen_msubr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBRS_H_LL: + gen_msubr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBRS_H_LU: + gen_msubr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBRS_H_UL: + gen_msubr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBRS_H_UU: + gen_msubr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_rrr1_msubq_h(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + TCGv temp, temp2; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + temp = tcg_const_i32(tcg_ctx, n); + temp2 = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_RRR1_MSUB_Q_32: + gen_msub32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, 32); + break; + case OPC2_32_RRR1_MSUB_Q_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msub64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n); + break; + case OPC2_32_RRR1_MSUB_Q_32_L: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); + gen_msub32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MSUB_Q_64_L: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); + gen_msub64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MSUB_Q_32_U: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); + gen_msub32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MSUB_Q_64_U: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); + gen_msub64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MSUB_Q_32_LL: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_m16sub32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUB_Q_64_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_m16sub64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUB_Q_32_UU: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_m16sub32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUB_Q_64_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_m16sub64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBS_Q_32: + gen_msubs32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, 32); + break; + case OPC2_32_RRR1_MSUBS_Q_64: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubs64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n); + break; + case OPC2_32_RRR1_MSUBS_Q_32_L: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); + gen_msubs32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MSUBS_Q_64_L: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2]); + gen_msubs64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MSUBS_Q_32_U: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); + gen_msubs32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + temp, n, 16); + break; + case OPC2_32_RRR1_MSUBS_Q_64_U: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r2], 16); + gen_msubs64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], temp, + n); + break; + case OPC2_32_RRR1_MSUBS_Q_32_LL: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_m16subs32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBS_Q_64_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_m16subs64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBS_Q_32_UU: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_m16subs32_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBS_Q_64_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_m16subs64_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBR_H_64_UL: + CHECK_REG_PAIR(r3); + gen_msubr64_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], + tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 2); + break; + case OPC2_32_RRR1_MSUBRS_H_64_UL: + CHECK_REG_PAIR(r3); + gen_msubr64s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3+1], + tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, 2); + break; + case OPC2_32_RRR1_MSUBR_Q_32_LL: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_msubr_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBR_Q_32_UU: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_msubr_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBRS_Q_32_LL: + tcg_gen_ext16s_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1]); + tcg_gen_ext16s_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2]); + gen_msubrs_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + case OPC2_32_RRR1_MSUBRS_Q_32_UU: + tcg_gen_sari_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_sari_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r2], 16); + gen_msubrs_q(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], temp, temp2, n); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); +} + +static void decode_rrr1_msubad_h(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t r1, r2, r3, r4, n; + + op2 = MASK_OP_RRR1_OP2(ctx->opcode); + r1 = MASK_OP_RRR1_S1(ctx->opcode); + r2 = MASK_OP_RRR1_S2(ctx->opcode); + r3 = MASK_OP_RRR1_S3(ctx->opcode); + r4 = MASK_OP_RRR1_D(ctx->opcode); + n = MASK_OP_RRR1_N(ctx->opcode); + + switch (op2) { + case OPC2_32_RRR1_MSUBAD_H_32_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubad_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBAD_H_32_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubad_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBAD_H_32_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubad_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBAD_H_32_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubad_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBADS_H_32_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubads_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBADS_H_32_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubads_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBADS_H_32_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubads_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBADS_H_32_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubads_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBADM_H_64_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubadm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBADM_H_64_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubadm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBADM_H_64_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubadm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBADM_H_64_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubadm_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBADMS_H_64_LL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubadms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBADMS_H_64_LU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubadms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBADMS_H_64_UL: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubadms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBADMS_H_64_UU: + CHECK_REG_PAIR(r4); + CHECK_REG_PAIR(r3); + gen_msubadms_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4+1], tcg_ctx->cpu_gpr_d[r3], + tcg_ctx->cpu_gpr_d[r3+1], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], + n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBADR_H_16_LL: + gen_msubadr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBADR_H_16_LU: + gen_msubadr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBADR_H_16_UL: + gen_msubadr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBADR_H_16_UU: + gen_msubadr32_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + case OPC2_32_RRR1_MSUBADRS_H_16_LL: + gen_msubadr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LL); + break; + case OPC2_32_RRR1_MSUBADRS_H_16_LU: + gen_msubadr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_LU); + break; + case OPC2_32_RRR1_MSUBADRS_H_16_UL: + gen_msubadr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UL); + break; + case OPC2_32_RRR1_MSUBADRS_H_16_UU: + gen_msubadr32s_h(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], + tcg_ctx->cpu_gpr_d[r2], n, MODE_UU); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +/* RRRR format */ +static void decode_rrrr_extract_insert(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3, r4; + TCGv tmp_width, tmp_pos; + + r1 = MASK_OP_RRRR_S1(ctx->opcode); + r2 = MASK_OP_RRRR_S2(ctx->opcode); + r3 = MASK_OP_RRRR_S3(ctx->opcode); + r4 = MASK_OP_RRRR_D(ctx->opcode); + op2 = MASK_OP_RRRR_OP2(ctx->opcode); + + tmp_pos = tcg_temp_new(tcg_ctx); + tmp_width = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_RRRR_DEXTR: + tcg_gen_andi_tl(tcg_ctx, tmp_pos, tcg_ctx->cpu_gpr_d[r3], 0x1f); + if (r1 == r2) { + tcg_gen_rotl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tmp_pos); + } else { + tcg_gen_shl_tl(tcg_ctx, tmp_width, tcg_ctx->cpu_gpr_d[r1], tmp_pos); + tcg_gen_subfi_tl(tcg_ctx, tmp_pos, 32, tmp_pos); + tcg_gen_shr_tl(tcg_ctx, tmp_pos, tcg_ctx->cpu_gpr_d[r2], tmp_pos); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tmp_width, tmp_pos); + } + break; + case OPC2_32_RRRR_EXTR: + case OPC2_32_RRRR_EXTR_U: + CHECK_REG_PAIR(r3); + tcg_gen_andi_tl(tcg_ctx, tmp_width, tcg_ctx->cpu_gpr_d[r3+1], 0x1f); + tcg_gen_andi_tl(tcg_ctx, tmp_pos, tcg_ctx->cpu_gpr_d[r3], 0x1f); + tcg_gen_add_tl(tcg_ctx, tmp_pos, tmp_pos, tmp_width); + tcg_gen_subfi_tl(tcg_ctx, tmp_pos, 32, tmp_pos); + tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tmp_pos); + tcg_gen_subfi_tl(tcg_ctx, tmp_width, 32, tmp_width); + if (op2 == OPC2_32_RRRR_EXTR) { + tcg_gen_sar_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4], tmp_width); + } else { + tcg_gen_shr_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4], tmp_width); + } + break; + case OPC2_32_RRRR_INSERT: + CHECK_REG_PAIR(r3); + tcg_gen_andi_tl(tcg_ctx, tmp_width, tcg_ctx->cpu_gpr_d[r3+1], 0x1f); + tcg_gen_andi_tl(tcg_ctx, tmp_pos, tcg_ctx->cpu_gpr_d[r3], 0x1f); + gen_insert(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], tmp_width, + tmp_pos); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, tmp_pos); + tcg_temp_free(tcg_ctx, tmp_width); +} + +/* RRRW format */ +static void decode_rrrw_extract_insert(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + int r1, r2, r3, r4; + int32_t width; + + TCGv temp, temp2; + + op2 = MASK_OP_RRRW_OP2(ctx->opcode); + r1 = MASK_OP_RRRW_S1(ctx->opcode); + r2 = MASK_OP_RRRW_S2(ctx->opcode); + r3 = MASK_OP_RRRW_S3(ctx->opcode); + r4 = MASK_OP_RRRW_D(ctx->opcode); + width = MASK_OP_RRRW_WIDTH(ctx->opcode); + + temp = tcg_temp_new(tcg_ctx); + + switch (op2) { + case OPC2_32_RRRW_EXTR: + tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r3], 0x1f); + tcg_gen_addi_tl(tcg_ctx, temp, temp, width); + tcg_gen_subfi_tl(tcg_ctx, temp, 32, temp); + tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], temp); + tcg_gen_sari_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4], 32 - width); + break; + case OPC2_32_RRRW_EXTR_U: + if (width == 0) { + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], 0); + } else { + tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r3], 0x1f); + tcg_gen_shr_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], temp); + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r4], ~0u >> (32-width)); + } + break; + case OPC2_32_RRRW_IMASK: + temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_andi_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r3], 0x1f); + tcg_gen_movi_tl(tcg_ctx, temp2, (1 << width) - 1); + tcg_gen_shl_tl(tcg_ctx, temp2, temp2, temp); + tcg_gen_shl_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r2], temp); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r4+1], temp2); + + tcg_temp_free(tcg_ctx, temp2); + break; + case OPC2_32_RRRW_INSERT: + temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_movi_tl(tcg_ctx, temp, width); + tcg_gen_andi_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r3], 0x1f); + gen_insert(ctx, tcg_ctx->cpu_gpr_d[r4], tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r2], temp, temp2); + + tcg_temp_free(tcg_ctx, temp2); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + tcg_temp_free(tcg_ctx, temp); +} + +/* SYS Format*/ +static void decode_sys_interrupts(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + uint32_t op2; + uint32_t r1; + TCGLabel *l1; + TCGv tmp; + + op2 = MASK_OP_SYS_OP2(ctx->opcode); + r1 = MASK_OP_SYS_S1D(ctx->opcode); + + switch (op2) { + case OPC2_32_SYS_DEBUG: + /* raise EXCP_DEBUG */ + break; + case OPC2_32_SYS_DISABLE: + tcg_gen_andi_tl(tcg_ctx, tcg_ctx->cpu_ICR, tcg_ctx->cpu_ICR, ~MASK_ICR_IE_1_3); + break; + case OPC2_32_SYS_DSYNC: + break; + case OPC2_32_SYS_ENABLE: + tcg_gen_ori_tl(tcg_ctx, tcg_ctx->cpu_ICR, tcg_ctx->cpu_ICR, MASK_ICR_IE_1_3); + break; + case OPC2_32_SYS_ISYNC: + break; + case OPC2_32_SYS_NOP: + break; + case OPC2_32_SYS_RET: + gen_compute_branch(ctx, op2, 0, 0, 0, 0); + break; + case OPC2_32_SYS_FRET: + gen_fret(ctx); + break; + case OPC2_32_SYS_RFE: + gen_helper_rfe(tcg_ctx, tcg_ctx->cpu_env); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + ctx->base.is_jmp = DISAS_NORETURN; + break; + case OPC2_32_SYS_RFM: + if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM) { + tmp = tcg_temp_new(tcg_ctx); + l1 = gen_new_label(tcg_ctx); + + tcg_gen_ld32u_tl(tcg_ctx, tmp, tcg_ctx->cpu_env, offsetof(CPUTriCoreState, DBGSR)); + tcg_gen_andi_tl(tcg_ctx, tmp, tmp, MASK_DBGSR_DE); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_NE, tmp, 1, l1); + gen_helper_rfm(tcg_ctx, tcg_ctx->cpu_env); + gen_set_label(tcg_ctx, l1); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + ctx->base.is_jmp = DISAS_NORETURN; + tcg_temp_free(tcg_ctx, tmp); + } else { + /* generate privilege trap */ + } + break; + case OPC2_32_SYS_RSLCX: + gen_helper_rslcx(tcg_ctx, tcg_ctx->cpu_env); + break; + case OPC2_32_SYS_SVLCX: + gen_helper_svlcx(tcg_ctx, tcg_ctx->cpu_env); + break; + case OPC2_32_SYS_RESTORE: + if (has_feature(ctx, TRICORE_FEATURE_16)) { + if ((ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_SM || + (ctx->hflags & TRICORE_HFLAG_KUU) == TRICORE_HFLAG_UM1) { + tcg_gen_deposit_tl(tcg_ctx, tcg_ctx->cpu_ICR, tcg_ctx->cpu_ICR, tcg_ctx->cpu_gpr_d[r1], 8, 1); + } /* else raise privilege trap */ + } else { + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } + break; + case OPC2_32_SYS_TRAPSV: + l1 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_PSW_SV, 0, l1); + generate_trap(ctx, TRAPC_ASSERT, TIN5_SOVF); + gen_set_label(tcg_ctx, l1); + break; + case OPC2_32_SYS_TRAPV: + l1 = gen_new_label(tcg_ctx); + tcg_gen_brcondi_tl(tcg_ctx, TCG_COND_GE, tcg_ctx->cpu_PSW_V, 0, l1); + generate_trap(ctx, TRAPC_ASSERT, TIN5_OVF); + gen_set_label(tcg_ctx, l1); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static void decode_32Bit_opc(DisasContext *ctx) +{ + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + + int op1; + int32_t r1, r2, r3; + int32_t address, const16; + int8_t b, const4; + int32_t bpos; + TCGv temp, temp2, temp3; + + op1 = MASK_OP_MAJOR(ctx->opcode); + + /* handle JNZ.T opcode only being 7 bit long */ + if (unlikely((op1 & 0x7f) == OPCM_32_BRN_JTT)) { + op1 = OPCM_32_BRN_JTT; + } + + switch (op1) { +/* ABS-format */ + case OPCM_32_ABS_LDW: + decode_abs_ldw(ctx); + break; + case OPCM_32_ABS_LDB: + decode_abs_ldb(ctx); + break; + case OPCM_32_ABS_LDMST_SWAP: + decode_abs_ldst_swap(ctx); + break; + case OPCM_32_ABS_LDST_CONTEXT: + decode_abs_ldst_context(ctx); + break; + case OPCM_32_ABS_STORE: + decode_abs_store(ctx); + break; + case OPCM_32_ABS_STOREB_H: + decode_abs_storeb_h(ctx); + break; + case OPC1_32_ABS_STOREQ: + address = MASK_OP_ABS_OFF18(ctx->opcode); + r1 = MASK_OP_ABS_S1D(ctx->opcode); + temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); + temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_shri_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_tl(tcg_ctx, temp2, temp, ctx->mem_idx, MO_LEUW); + + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp); + break; + case OPC1_32_ABS_LD_Q: + address = MASK_OP_ABS_OFF18(ctx->opcode); + r1 = MASK_OP_ABS_S1D(ctx->opcode); + temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); + + tcg_gen_qemu_ld_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); + tcg_gen_shli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r1], tcg_ctx->cpu_gpr_d[r1], 16); + + tcg_temp_free(tcg_ctx, temp); + break; + case OPC1_32_ABS_LEA: + address = MASK_OP_ABS_OFF18(ctx->opcode); + r1 = MASK_OP_ABS_S1D(ctx->opcode); + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_gpr_a[r1], EA_ABS_FORMAT(address)); + break; +/* ABSB-format */ + case OPC1_32_ABSB_ST_T: + address = MASK_OP_ABS_OFF18(ctx->opcode); + b = MASK_OP_ABSB_B(ctx->opcode); + bpos = MASK_OP_ABSB_BPOS(ctx->opcode); + + temp = tcg_const_i32(tcg_ctx, EA_ABS_FORMAT(address)); + temp2 = tcg_temp_new(tcg_ctx); + + tcg_gen_qemu_ld_tl(tcg_ctx, temp2, temp, ctx->mem_idx, MO_UB); + tcg_gen_andi_tl(tcg_ctx, temp2, temp2, ~(0x1u << bpos)); + tcg_gen_ori_tl(tcg_ctx, temp2, temp2, (b << bpos)); + tcg_gen_qemu_st_tl(tcg_ctx, temp2, temp, ctx->mem_idx, MO_UB); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + break; +/* B-format */ + case OPC1_32_B_CALL: + case OPC1_32_B_CALLA: + case OPC1_32_B_FCALL: + case OPC1_32_B_FCALLA: + case OPC1_32_B_J: + case OPC1_32_B_JA: + case OPC1_32_B_JL: + case OPC1_32_B_JLA: + address = MASK_OP_B_DISP24_SEXT(ctx->opcode); + gen_compute_branch(ctx, op1, 0, 0, 0, address); + break; +/* Bit-format */ + case OPCM_32_BIT_ANDACC: + decode_bit_andacc(ctx); + break; + case OPCM_32_BIT_LOGICAL_T1: + decode_bit_logical_t(ctx); + break; + case OPCM_32_BIT_INSERT: + decode_bit_insert(ctx); + break; + case OPCM_32_BIT_LOGICAL_T2: + decode_bit_logical_t2(ctx); + break; + case OPCM_32_BIT_ORAND: + decode_bit_orand(ctx); + break; + case OPCM_32_BIT_SH_LOGIC1: + decode_bit_sh_logic1(ctx); + break; + case OPCM_32_BIT_SH_LOGIC2: + decode_bit_sh_logic2(ctx); + break; + /* BO Format */ + case OPCM_32_BO_ADDRMODE_POST_PRE_BASE: + decode_bo_addrmode_post_pre_base(ctx); + break; + case OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR: + decode_bo_addrmode_bitreverse_circular(ctx); + break; + case OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE: + decode_bo_addrmode_ld_post_pre_base(ctx); + break; + case OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR: + decode_bo_addrmode_ld_bitreverse_circular(ctx); + break; + case OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE: + decode_bo_addrmode_stctx_post_pre_base(ctx); + break; + case OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR: + decode_bo_addrmode_ldmst_bitreverse_circular(ctx); + break; +/* BOL-format */ + case OPC1_32_BOL_LD_A_LONGOFF: + case OPC1_32_BOL_LD_W_LONGOFF: + case OPC1_32_BOL_LEA_LONGOFF: + case OPC1_32_BOL_ST_W_LONGOFF: + case OPC1_32_BOL_ST_A_LONGOFF: + case OPC1_32_BOL_LD_B_LONGOFF: + case OPC1_32_BOL_LD_BU_LONGOFF: + case OPC1_32_BOL_LD_H_LONGOFF: + case OPC1_32_BOL_LD_HU_LONGOFF: + case OPC1_32_BOL_ST_B_LONGOFF: + case OPC1_32_BOL_ST_H_LONGOFF: + decode_bol_opc(ctx, op1); + break; +/* BRC Format */ + case OPCM_32_BRC_EQ_NEQ: + case OPCM_32_BRC_GE: + case OPCM_32_BRC_JLT: + case OPCM_32_BRC_JNE: + const4 = MASK_OP_BRC_CONST4_SEXT(ctx->opcode); + address = MASK_OP_BRC_DISP15_SEXT(ctx->opcode); + r1 = MASK_OP_BRC_S1(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, const4, address); + break; +/* BRN Format */ + case OPCM_32_BRN_JTT: + address = MASK_OP_BRN_DISP15_SEXT(ctx->opcode); + r1 = MASK_OP_BRN_S1(ctx->opcode); + gen_compute_branch(ctx, op1, r1, 0, 0, address); + break; +/* BRR Format */ + case OPCM_32_BRR_EQ_NEQ: + case OPCM_32_BRR_ADDR_EQ_NEQ: + case OPCM_32_BRR_GE: + case OPCM_32_BRR_JLT: + case OPCM_32_BRR_JNE: + case OPCM_32_BRR_JNZ: + case OPCM_32_BRR_LOOP: + address = MASK_OP_BRR_DISP15_SEXT(ctx->opcode); + r2 = MASK_OP_BRR_S2(ctx->opcode); + r1 = MASK_OP_BRR_S1(ctx->opcode); + gen_compute_branch(ctx, op1, r1, r2, 0, address); + break; +/* RC Format */ + case OPCM_32_RC_LOGICAL_SHIFT: + decode_rc_logical_shift(ctx); + break; + case OPCM_32_RC_ACCUMULATOR: + decode_rc_accumulator(ctx); + break; + case OPCM_32_RC_SERVICEROUTINE: + decode_rc_serviceroutine(ctx); + break; + case OPCM_32_RC_MUL: + decode_rc_mul(ctx); + break; +/* RCPW Format */ + case OPCM_32_RCPW_MASK_INSERT: + decode_rcpw_insert(ctx); + break; +/* RCRR Format */ + case OPC1_32_RCRR_INSERT: + r1 = MASK_OP_RCRR_S1(ctx->opcode); + r2 = MASK_OP_RCRR_S3(ctx->opcode); + r3 = MASK_OP_RCRR_D(ctx->opcode); + const16 = MASK_OP_RCRR_CONST4(ctx->opcode); + temp = tcg_const_i32(tcg_ctx, const16); + temp2 = tcg_temp_new(tcg_ctx); /* width*/ + temp3 = tcg_temp_new(tcg_ctx); /* pos */ + + CHECK_REG_PAIR(r3); + + tcg_gen_andi_tl(tcg_ctx, temp2, tcg_ctx->cpu_gpr_d[r3+1], 0x1f); + tcg_gen_andi_tl(tcg_ctx, temp3, tcg_ctx->cpu_gpr_d[r3], 0x1f); + + gen_insert(ctx, tcg_ctx->cpu_gpr_d[r2], tcg_ctx->cpu_gpr_d[r1], temp, temp2, temp3); + + tcg_temp_free(tcg_ctx, temp); + tcg_temp_free(tcg_ctx, temp2); + tcg_temp_free(tcg_ctx, temp3); + break; +/* RCRW Format */ + case OPCM_32_RCRW_MASK_INSERT: + decode_rcrw_insert(ctx); + break; +/* RCR Format */ + case OPCM_32_RCR_COND_SELECT: + decode_rcr_cond_select(ctx); + break; + case OPCM_32_RCR_MADD: + decode_rcr_madd(ctx); + break; + case OPCM_32_RCR_MSUB: + decode_rcr_msub(ctx); + break; +/* RLC Format */ + case OPC1_32_RLC_ADDI: + case OPC1_32_RLC_ADDIH: + case OPC1_32_RLC_ADDIH_A: + case OPC1_32_RLC_MFCR: + case OPC1_32_RLC_MOV: + case OPC1_32_RLC_MOV_64: + case OPC1_32_RLC_MOV_U: + case OPC1_32_RLC_MOV_H: + case OPC1_32_RLC_MOVH_A: + case OPC1_32_RLC_MTCR: + decode_rlc_opc(ctx, op1); + break; +/* RR Format */ + case OPCM_32_RR_ACCUMULATOR: + decode_rr_accumulator(ctx); + break; + case OPCM_32_RR_LOGICAL_SHIFT: + decode_rr_logical_shift(ctx); + break; + case OPCM_32_RR_ADDRESS: + decode_rr_address(ctx); + break; + case OPCM_32_RR_IDIRECT: + decode_rr_idirect(ctx); + break; + case OPCM_32_RR_DIVIDE: + decode_rr_divide(ctx); + break; +/* RR1 Format */ + case OPCM_32_RR1_MUL: + decode_rr1_mul(ctx); + break; + case OPCM_32_RR1_MULQ: + decode_rr1_mulq(ctx); + break; +/* RR2 format */ + case OPCM_32_RR2_MUL: + decode_rr2_mul(ctx); + break; +/* RRPW format */ + case OPCM_32_RRPW_EXTRACT_INSERT: + decode_rrpw_extract_insert(ctx); + break; + case OPC1_32_RRPW_DEXTR: + r1 = MASK_OP_RRPW_S1(ctx->opcode); + r2 = MASK_OP_RRPW_S2(ctx->opcode); + r3 = MASK_OP_RRPW_D(ctx->opcode); + const16 = MASK_OP_RRPW_POS(ctx->opcode); + if (r1 == r2) { + tcg_gen_rotli_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r1], const16); + } else { + temp = tcg_temp_new(tcg_ctx); + tcg_gen_shli_tl(tcg_ctx, temp, tcg_ctx->cpu_gpr_d[r1], const16); + tcg_gen_shri_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r2], 32 - const16); + tcg_gen_or_tl(tcg_ctx, tcg_ctx->cpu_gpr_d[r3], tcg_ctx->cpu_gpr_d[r3], temp); + tcg_temp_free(tcg_ctx, temp); + } + break; +/* RRR Format */ + case OPCM_32_RRR_COND_SELECT: + decode_rrr_cond_select(ctx); + break; + case OPCM_32_RRR_DIVIDE: + decode_rrr_divide(ctx); + break; +/* RRR2 Format */ + case OPCM_32_RRR2_MADD: + decode_rrr2_madd(ctx); + break; + case OPCM_32_RRR2_MSUB: + decode_rrr2_msub(ctx); + break; +/* RRR1 format */ + case OPCM_32_RRR1_MADD: + decode_rrr1_madd(ctx); + break; + case OPCM_32_RRR1_MADDQ_H: + decode_rrr1_maddq_h(ctx); + break; + case OPCM_32_RRR1_MADDSU_H: + decode_rrr1_maddsu_h(ctx); + break; + case OPCM_32_RRR1_MSUB_H: + decode_rrr1_msub(ctx); + break; + case OPCM_32_RRR1_MSUB_Q: + decode_rrr1_msubq_h(ctx); + break; + case OPCM_32_RRR1_MSUBAD_H: + decode_rrr1_msubad_h(ctx); + break; +/* RRRR format */ + case OPCM_32_RRRR_EXTRACT_INSERT: + decode_rrrr_extract_insert(ctx); + break; +/* RRRW format */ + case OPCM_32_RRRW_EXTRACT_INSERT: + decode_rrrw_extract_insert(ctx); + break; +/* SYS format */ + case OPCM_32_SYS_INTERRUPTS: + decode_sys_interrupts(ctx); + break; + case OPC1_32_SYS_RSTV: + tcg_gen_movi_tl(tcg_ctx, tcg_ctx->cpu_PSW_V, 0); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_SV, tcg_ctx->cpu_PSW_V); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_AV, tcg_ctx->cpu_PSW_V); + tcg_gen_mov_tl(tcg_ctx, tcg_ctx->cpu_PSW_SAV, tcg_ctx->cpu_PSW_V); + break; + default: + generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); + } +} + +static bool tricore_insn_is_16bit(uint32_t insn) +{ + return (insn & 0x1) == 0; +} + +static void tricore_tr_init_disas_context(DisasContextBase *dcbase, + CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + CPUTriCoreState *env = cs->env_ptr; + + // unicorn setup + ctx->uc = cs->uc; + + ctx->mem_idx = cpu_mmu_index(env, false); + ctx->hflags = (uint32_t)ctx->base.tb->flags; + ctx->features = env->features; +} + +static void tricore_tr_tb_start(DisasContextBase *db, CPUState *cpu) +{ +} + +static void tricore_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + tcg_gen_insn_start(tcg_ctx, ctx->base.pc_next); +} + +static bool insn_crosses_page(CPUTriCoreState *env, DisasContext *ctx) +{ + /* + * Return true if the insn at ctx->base.pc_next might cross a page boundary. + * (False positives are OK, false negatives are not.) + * Our caller ensures we are only called if dc->base.pc_next is less than + * 4 bytes from the page boundary, so we cross the page if the first + * 16 bits indicate that this is a 32 bit insn. + */ + uint16_t insn = cpu_lduw_code(env, ctx->base.pc_next); + + return !tricore_insn_is_16bit(insn); +} + + +static void tricore_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + struct uc_struct *uc = ctx->uc; + TCGContext *tcg_ctx = ctx->uc->tcg_ctx; + CPUTriCoreState *env = cpu->env_ptr; + uint16_t insn_lo; + bool is_16bit; + uint32_t insn_size; + + // Unicorn: end address tells us to stop emulation + if (uc_addr_is_exit(uc, ctx->base.pc_next)) { + gen_helper_rfe(tcg_ctx, tcg_ctx->cpu_env); + tcg_gen_exit_tb(tcg_ctx, NULL, 0); + cpu->exception_index = EXCP_HLT; + cpu->halted = 1; + ctx->base.is_jmp = DISAS_NORETURN; + } else { + insn_lo = cpu_lduw_code(env, ctx->base.pc_next); + is_16bit = tricore_insn_is_16bit(insn_lo); + + insn_size = is_16bit ? 2 : 4; + // Unicorn: trace this instruction on request + if (HOOK_EXISTS_BOUNDED(ctx->uc, UC_HOOK_CODE, ctx->base.pc_next)) { + gen_uc_tracecode(tcg_ctx, insn_size, UC_HOOK_CODE_IDX, ctx->uc, + ctx->base.pc_next); + // the callback might want to stop emulation immediately + check_exit_request(tcg_ctx); + } + + if (is_16bit) { + ctx->opcode = insn_lo; + ctx->pc_succ_insn = ctx->base.pc_next + 2; + decode_16Bit_opc(ctx); + } else { + uint32_t insn_hi = cpu_lduw_code(env, ctx->base.pc_next + 2); + ctx->opcode = insn_hi << 16 | insn_lo; + ctx->pc_succ_insn = ctx->base.pc_next + 4; + decode_32Bit_opc(ctx); + } + ctx->base.pc_next = ctx->pc_succ_insn; + + if (ctx->base.is_jmp == DISAS_NEXT) { + target_ulong page_start; + + page_start = ctx->base.pc_first & TARGET_PAGE_MASK; + if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE + || (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE - 3 + && insn_crosses_page(env, ctx))) { + ctx->base.is_jmp = DISAS_TOO_MANY; + } + } + } +} + +static void tricore_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + + // if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) { + // save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT); + // gen_helper_raise_exception_debug(tcg_ctx, tcg_ctx->cpu_env); + // } else { + switch (ctx->base.is_jmp) { + case DISAS_TOO_MANY: + gen_goto_tb(ctx, 0, ctx->base.pc_next); + break; + case DISAS_NORETURN: + break; + default: + g_assert_not_reached(); + } +} + +static const TranslatorOps tricore_tr_ops = { + .init_disas_context = tricore_tr_init_disas_context, + .tb_start = tricore_tr_tb_start, + .insn_start = tricore_tr_insn_start, + .translate_insn = tricore_tr_translate_insn, + .tb_stop = tricore_tr_tb_stop, +}; + + +void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) +{ + DisasContext ctx; + memset(&ctx, 0, sizeof(ctx)); + translator_loop(&tricore_tr_ops, &ctx.base, cs, tb, max_insns); +} + +void +restore_state_to_opc(CPUTriCoreState *env, TranslationBlock *tb, + target_ulong *data) +{ + env->PC = data[0]; +} + +/* + * + * Initialization + * + */ +void cpu_state_reset(CPUTriCoreState *env) +{ + /* Reset Regs to Default Value */ + env->PSW = 0xb80; + fpu_set_state(env); +} + +static void tricore_tcg_init_csfr(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + + tcg_ctx->cpu_PCXI = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, PCXI), "PCXI"); + tcg_ctx->cpu_PSW = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, PSW), "PSW"); + tcg_ctx->cpu_PC = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, PC), "PC"); + tcg_ctx->cpu_ICR = tcg_global_mem_new(tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, ICR), "ICR"); +} + +void tricore_tcg_init(struct uc_struct *uc) +{ + TCGContext *tcg_ctx = uc->tcg_ctx; + int i; + + /* reg init */ + for (i = 0 ; i < 16 ; i++) { + tcg_ctx->cpu_gpr_a[i] = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, gpr_a[i]), + regnames_a[i]); + } + for (i = 0 ; i < 16 ; i++) { + tcg_ctx->cpu_gpr_d[i] = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, gpr_d[i]), + regnames_d[i]); + } + tricore_tcg_init_csfr(uc); + /* init PSW flag cache */ + tcg_ctx->cpu_PSW_C = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, PSW_USB_C), + "PSW_C"); + tcg_ctx->cpu_PSW_V = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, PSW_USB_V), + "PSW_V"); + tcg_ctx->cpu_PSW_SV = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, PSW_USB_SV), + "PSW_SV"); + tcg_ctx->cpu_PSW_AV = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, PSW_USB_AV), + "PSW_AV"); + tcg_ctx->cpu_PSW_SAV = tcg_global_mem_new(uc->tcg_ctx, tcg_ctx->cpu_env, + offsetof(CPUTriCoreState, PSW_USB_SAV), + "PSW_SAV"); +} \ No newline at end of file diff --git a/qemu/target/tricore/tricore-defs.h b/qemu/target/tricore/tricore-defs.h new file mode 100644 index 00000000..f5e0a0be --- /dev/null +++ b/qemu/target/tricore/tricore-defs.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef QEMU_TRICORE_DEFS_H +#define QEMU_TRICORE_DEFS_H + +#define TRICORE_TLB_MAX 128 + +#endif /* QEMU_TRICORE_DEFS_H */ diff --git a/qemu/target/tricore/tricore-opcodes.h b/qemu/target/tricore/tricore-opcodes.h new file mode 100644 index 00000000..f7135f18 --- /dev/null +++ b/qemu/target/tricore/tricore-opcodes.h @@ -0,0 +1,1474 @@ +/* + * Copyright (c) 2012-2014 Bastian Koppelmann C-Lab/University Paderborn + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#ifndef TARGET_TRICORE_TRICORE_OPCODES_H +#define TARGET_TRICORE_TRICORE_OPCODES_H + +/* + * Opcode Masks for Tricore + * Format MASK_OP_InstrFormatName_Field + */ + +/* This creates a mask with bits start .. end set to 1 and applies it to op */ +#define MASK_BITS_SHIFT(op, start, end) (extract32(op, (start), \ + (end) - (start) + 1)) +#define MASK_BITS_SHIFT_SEXT(op, start, end) (sextract32(op, (start),\ + (end) - (start) + 1)) + +/* new opcode masks */ + +#define MASK_OP_MAJOR(op) MASK_BITS_SHIFT(op, 0, 7) + +/* 16-Bit Formats */ +#define MASK_OP_SB_DISP8(op) MASK_BITS_SHIFT(op, 8, 15) +#define MASK_OP_SB_DISP8_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 8, 15) + +#define MASK_OP_SBC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SBC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) +#define MASK_OP_SBC_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SBR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SBR_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SBRN_N(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SBRN_DISP4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SC_CONST8(op) MASK_BITS_SHIFT(op, 8, 15) + +#define MASK_OP_SLR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SLR_D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SLRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SLRO_D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SR_OP2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SR_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) +#define MASK_OP_SRC_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRO_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRO_OFF4(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRR_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SRRS_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SRRS_S1D(op) MASK_BITS_SHIFT(op, 8, 11) +#define MASK_OP_SRRS_N(op) MASK_BITS_SHIFT(op, 6, 7) + +#define MASK_OP_SSR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SSR_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +#define MASK_OP_SSRO_OFF4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_SSRO_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* 32-Bit Formats */ + +/* ABS Format */ +#define MASK_OP_ABS_OFF18(op) (MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6) + \ + (MASK_BITS_SHIFT(op, 22, 25) << 10) +\ + (MASK_BITS_SHIFT(op, 12, 15) << 14)) +#define MASK_OP_ABS_OP2(op) MASK_BITS_SHIFT(op, 26, 27) +#define MASK_OP_ABS_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +/* ABSB Format */ +#define MASK_OP_ABSB_OFF18(op) MASK_OP_ABS_OFF18(op) +#define MASK_OP_ABSB_OP2(op) MASK_BITS_SHIFT(op, 26, 27) +#define MASK_OP_ABSB_B(op) MASK_BITS_SHIFT(op, 11, 11) +#define MASK_OP_ABSB_BPOS(op) MASK_BITS_SHIFT(op, 8, 10) + +/* B Format */ +#define MASK_OP_B_DISP24(op) (MASK_BITS_SHIFT(op, 16, 31) + \ + (MASK_BITS_SHIFT(op, 8, 15) << 16)) +#define MASK_OP_B_DISP24_SEXT(op) (MASK_BITS_SHIFT(op, 16, 31) + \ + (MASK_BITS_SHIFT_SEXT(op, 8, 15) << 16)) +/* BIT Format */ +#define MASK_OP_BIT_D(op) MASK_BITS_SHIFT(op, 28, 31) +#define MASK_OP_BIT_POS2(op) MASK_BITS_SHIFT(op, 23, 27) +#define MASK_OP_BIT_OP2(op) MASK_BITS_SHIFT(op, 21, 22) +#define MASK_OP_BIT_POS1(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_BIT_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BIT_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BO Format */ +#define MASK_OP_BO_OFF10(op) (MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6)) +#define MASK_OP_BO_OFF10_SEXT(op) (MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT_SEXT(op, 28, 31) << 6)) +#define MASK_OP_BO_OP2(op) MASK_BITS_SHIFT(op, 22, 27) +#define MASK_OP_BO_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BO_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BOL Format */ +#define MASK_OP_BOL_OFF16(op) ((MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \ + (MASK_BITS_SHIFT(op, 22, 27) << 10)) +#define MASK_OP_BOL_OFF16_SEXT(op) ((MASK_BITS_SHIFT(op, 16, 21) + \ + (MASK_BITS_SHIFT(op, 28, 31) << 6)) + \ + (MASK_BITS_SHIFT_SEXT(op, 22, 27) << 10)) +#define MASK_OP_BOL_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BOL_S1D(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BRC Format */ +#define MASK_OP_BRC_OP2(op) MASK_BITS_SHIFT(op, 31, 31) +#define MASK_OP_BRC_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) +#define MASK_OP_BRC_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30) +#define MASK_OP_BRC_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BRC_CONST4_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 15) +#define MASK_OP_BRC_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* BRN Format */ +#define MASK_OP_BRN_OP2(op) MASK_BITS_SHIFT(op, 31, 31) +#define MASK_OP_BRN_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) +#define MASK_OP_BRN_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30) +#define MASK_OP_BRN_N(op) (MASK_BITS_SHIFT(op, 12, 15) + \ + (MASK_BITS_SHIFT(op, 7, 7) << 4)) +#define MASK_OP_BRN_S1(op) MASK_BITS_SHIFT(op, 8, 11) +/* BRR Format */ +#define MASK_OP_BRR_OP2(op) MASK_BITS_SHIFT(op, 31, 31) +#define MASK_OP_BRR_DISP15(op) MASK_BITS_SHIFT(op, 16, 30) +#define MASK_OP_BRR_DISP15_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 16, 30) +#define MASK_OP_BRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_BRR_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* META MASK for similar instr Formats */ +#define MASK_OP_META_D(op) MASK_BITS_SHIFT(op, 28, 31) +#define MASK_OP_META_S1(op) MASK_BITS_SHIFT(op, 8, 11) + +/* RC Format */ +#define MASK_OP_RC_D(op) MASK_OP_META_D(op) +#define MASK_OP_RC_OP2(op) MASK_BITS_SHIFT(op, 21, 27) +#define MASK_OP_RC_CONST9(op) MASK_BITS_SHIFT(op, 12, 20) +#define MASK_OP_RC_CONST9_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 20) +#define MASK_OP_RC_S1(op) MASK_OP_META_S1(op) + +/* RCPW Format */ + +#define MASK_OP_RCPW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCPW_POS(op) MASK_BITS_SHIFT(op, 23, 27) +#define MASK_OP_RCPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22) +#define MASK_OP_RCPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RCPW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RCPW_S1(op) MASK_OP_META_S1(op) + +/* RCR Format */ + +#define MASK_OP_RCR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RCR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RCR_CONST9(op) MASK_BITS_SHIFT(op, 12, 20) +#define MASK_OP_RCR_CONST9_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 20) +#define MASK_OP_RCR_S1(op) MASK_OP_META_S1(op) + +/* RCRR Format */ + +#define MASK_OP_RCRR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RCRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RCRR_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RCRR_S1(op) MASK_OP_META_S1(op) + +/* RCRW Format */ + +#define MASK_OP_RCRW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RCRW_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RCRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RCRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RCRW_CONST4(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RCRW_S1(op) MASK_OP_META_S1(op) + +/* RLC Format */ + +#define MASK_OP_RLC_D(op) MASK_OP_META_D(op) +#define MASK_OP_RLC_CONST16(op) MASK_BITS_SHIFT(op, 12, 27) +#define MASK_OP_RLC_CONST16_SEXT(op) MASK_BITS_SHIFT_SEXT(op, 12, 27) +#define MASK_OP_RLC_S1(op) MASK_OP_META_S1(op) + +/* RR Format */ +#define MASK_OP_RR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RR_OP2(op) MASK_BITS_SHIFT(op, 20, 27) +#define MASK_OP_RR_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RR_S1(op) MASK_OP_META_S1(op) + +/* RR1 Format */ +#define MASK_OP_RR1_D(op) MASK_OP_META_D(op) +#define MASK_OP_RR1_OP2(op) MASK_BITS_SHIFT(op, 18, 27) +#define MASK_OP_RR1_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RR1_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RR1_S1(op) MASK_OP_META_S1(op) + +/* RR2 Format */ +#define MASK_OP_RR2_D(op) MASK_OP_META_D(op) +#define MASK_OP_RR2_OP2(op) MASK_BITS_SHIFT(op, 16, 27) +#define MASK_OP_RR2_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RR2_S1(op) MASK_OP_META_S1(op) + +/* RRPW Format */ +#define MASK_OP_RRPW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRPW_POS(op) MASK_BITS_SHIFT(op, 23, 27) +#define MASK_OP_RRPW_OP2(op) MASK_BITS_SHIFT(op, 21, 22) +#define MASK_OP_RRPW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RRPW_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRPW_S1(op) MASK_OP_META_S1(op) + +/* RRR Format */ +#define MASK_OP_RRR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRR_OP2(op) MASK_BITS_SHIFT(op, 20, 23) +#define MASK_OP_RRR_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRR_S1(op) MASK_OP_META_S1(op) + +/* RRR1 Format */ +#define MASK_OP_RRR1_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRR1_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRR1_OP2(op) MASK_BITS_SHIFT(op, 18, 23) +#define MASK_OP_RRR1_N(op) MASK_BITS_SHIFT(op, 16, 17) +#define MASK_OP_RRR1_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRR1_S1(op) MASK_OP_META_S1(op) + +/* RRR2 Format */ +#define MASK_OP_RRR2_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRR2_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRR2_OP2(op) MASK_BITS_SHIFT(op, 16, 23) +#define MASK_OP_RRR2_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRR2_S1(op) MASK_OP_META_S1(op) + +/* RRRR Format */ +#define MASK_OP_RRRR_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRRR_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRRR_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RRRR_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRRR_S1(op) MASK_OP_META_S1(op) + +/* RRRW Format */ +#define MASK_OP_RRRW_D(op) MASK_OP_META_D(op) +#define MASK_OP_RRRW_S3(op) MASK_BITS_SHIFT(op, 24, 27) +#define MASK_OP_RRRW_OP2(op) MASK_BITS_SHIFT(op, 21, 23) +#define MASK_OP_RRRW_WIDTH(op) MASK_BITS_SHIFT(op, 16, 20) +#define MASK_OP_RRRW_S2(op) MASK_BITS_SHIFT(op, 12, 15) +#define MASK_OP_RRRW_S1(op) MASK_OP_META_S1(op) + +/* SYS Format */ +#define MASK_OP_SYS_OP2(op) MASK_BITS_SHIFT(op, 22, 27) +#define MASK_OP_SYS_S1D(op) MASK_OP_META_S1(op) + + + +/* + * Tricore Opcodes Enums + * + * Format: OPC(1|2|M)_InstrLen_Name + * OPC1 = only op1 field is used + * OPC2 = op1 and op2 field used part of OPCM + * OPCM = op1 field used to group Instr + * InstrLen = 16|32 + * Name = Name of Instr + */ + +/* 16-Bit */ +enum { + + OPCM_16_SR_SYSTEM = 0x00, + OPCM_16_SR_ACCU = 0x32, + + OPC1_16_SRC_ADD = 0xc2, + OPC1_16_SRC_ADD_A15 = 0x92, + OPC1_16_SRC_ADD_15A = 0x9a, + OPC1_16_SRR_ADD = 0x42, + OPC1_16_SRR_ADD_A15 = 0x12, + OPC1_16_SRR_ADD_15A = 0x1a, + OPC1_16_SRC_ADD_A = 0xb0, + OPC1_16_SRR_ADD_A = 0x30, + OPC1_16_SRR_ADDS = 0x22, + OPC1_16_SRRS_ADDSC_A = 0x10, + OPC1_16_SC_AND = 0x16, + OPC1_16_SRR_AND = 0x26, + OPC1_16_SC_BISR = 0xe0, + OPC1_16_SRC_CADD = 0x8a, + OPC1_16_SRC_CADDN = 0xca, + OPC1_16_SB_CALL = 0x5c, + OPC1_16_SRC_CMOV = 0xaa, + OPC1_16_SRR_CMOV = 0x2a, + OPC1_16_SRC_CMOVN = 0xea, + OPC1_16_SRR_CMOVN = 0x6a, + OPC1_16_SRC_EQ = 0xba, + OPC1_16_SRR_EQ = 0x3a, + OPC1_16_SB_J = 0x3c, + OPC1_16_SBC_JEQ = 0x1e, + OPC1_16_SBC_JEQ2 = 0x9e, + OPC1_16_SBR_JEQ = 0x3e, + OPC1_16_SBR_JEQ2 = 0xbe, + OPC1_16_SBR_JGEZ = 0xce, + OPC1_16_SBR_JGTZ = 0x4e, + OPC1_16_SR_JI = 0xdc, + OPC1_16_SBR_JLEZ = 0x8e, + OPC1_16_SBR_JLTZ = 0x0e, + OPC1_16_SBC_JNE = 0x5e, + OPC1_16_SBC_JNE2 = 0xde, + OPC1_16_SBR_JNE = 0x7e, + OPC1_16_SBR_JNE2 = 0xfe, + OPC1_16_SB_JNZ = 0xee, + OPC1_16_SBR_JNZ = 0xf6, + OPC1_16_SBR_JNZ_A = 0x7c, + OPC1_16_SBRN_JNZ_T = 0xae, + OPC1_16_SB_JZ = 0x6e, + OPC1_16_SBR_JZ = 0x76, + OPC1_16_SBR_JZ_A = 0xbc, + OPC1_16_SBRN_JZ_T = 0x2e, + OPC1_16_SC_LD_A = 0xd8, + OPC1_16_SLR_LD_A = 0xd4, + OPC1_16_SLR_LD_A_POSTINC = 0xc4, + OPC1_16_SLRO_LD_A = 0xc8, + OPC1_16_SRO_LD_A = 0xcc, + OPC1_16_SLR_LD_BU = 0x14, + OPC1_16_SLR_LD_BU_POSTINC = 0x04, + OPC1_16_SLRO_LD_BU = 0x08, + OPC1_16_SRO_LD_BU = 0x0c, + OPC1_16_SLR_LD_H = 0x94, + OPC1_16_SLR_LD_H_POSTINC = 0x84, + OPC1_16_SLRO_LD_H = 0x88, + OPC1_16_SRO_LD_H = 0x8c, + OPC1_16_SC_LD_W = 0x58, + OPC1_16_SLR_LD_W = 0x54, + OPC1_16_SLR_LD_W_POSTINC = 0x44, + OPC1_16_SLRO_LD_W = 0x48, + OPC1_16_SRO_LD_W = 0x4c, + OPC1_16_SBR_LOOP = 0xfc, + OPC1_16_SRC_LT = 0xfa, + OPC1_16_SRR_LT = 0x7a, + OPC1_16_SC_MOV = 0xda, + OPC1_16_SRC_MOV = 0x82, + OPC1_16_SRR_MOV = 0x02, + OPC1_16_SRC_MOV_E = 0xd2,/* 1.6 only */ + OPC1_16_SRC_MOV_A = 0xa0, + OPC1_16_SRR_MOV_A = 0x60, + OPC1_16_SRR_MOV_AA = 0x40, + OPC1_16_SRR_MOV_D = 0x80, + OPC1_16_SRR_MUL = 0xe2, + OPC1_16_SR_NOT = 0x46, + OPC1_16_SC_OR = 0x96, + OPC1_16_SRR_OR = 0xa6, + OPC1_16_SRC_SH = 0x06, + OPC1_16_SRC_SHA = 0x86, + OPC1_16_SC_ST_A = 0xf8, + OPC1_16_SRO_ST_A = 0xec, + OPC1_16_SSR_ST_A = 0xf4, + OPC1_16_SSR_ST_A_POSTINC = 0xe4, + OPC1_16_SSRO_ST_A = 0xe8, + OPC1_16_SRO_ST_B = 0x2c, + OPC1_16_SSR_ST_B = 0x34, + OPC1_16_SSR_ST_B_POSTINC = 0x24, + OPC1_16_SSRO_ST_B = 0x28, + OPC1_16_SRO_ST_H = 0xac, + OPC1_16_SSR_ST_H = 0xb4, + OPC1_16_SSR_ST_H_POSTINC = 0xa4, + OPC1_16_SSRO_ST_H = 0xa8, + OPC1_16_SC_ST_W = 0x78, + OPC1_16_SRO_ST_W = 0x6c, + OPC1_16_SSR_ST_W = 0x74, + OPC1_16_SSR_ST_W_POSTINC = 0x64, + OPC1_16_SSRO_ST_W = 0x68, + OPC1_16_SRR_SUB = 0xa2, + OPC1_16_SRR_SUB_A15B = 0x52, + OPC1_16_SRR_SUB_15AB = 0x5a, + OPC1_16_SC_SUB_A = 0x20, + OPC1_16_SRR_SUBS = 0x62, + OPC1_16_SRR_XOR = 0xc6, + +}; + +/* + * SR Format + */ +/* OPCM_16_SR_SYSTEM */ +enum { + + OPC2_16_SR_NOP = 0x00, + OPC2_16_SR_RET = 0x09, + OPC2_16_SR_RFE = 0x08, + OPC2_16_SR_DEBUG = 0x0a, + OPC2_16_SR_FRET = 0x07, +}; +/* OPCM_16_SR_ACCU */ +enum { + OPC2_16_SR_RSUB = 0x05, + OPC2_16_SR_SAT_B = 0x00, + OPC2_16_SR_SAT_BU = 0x01, + OPC2_16_SR_SAT_H = 0x02, + OPC2_16_SR_SAT_HU = 0x03, + +}; + +/* 32-Bit */ + +enum { +/* ABS Format 1, M */ + OPCM_32_ABS_LDW = 0x85, + OPCM_32_ABS_LDB = 0x05, + OPCM_32_ABS_LDMST_SWAP = 0xe5, + OPCM_32_ABS_LDST_CONTEXT = 0x15, + OPCM_32_ABS_STORE = 0xa5, + OPCM_32_ABS_STOREB_H = 0x25, + OPC1_32_ABS_STOREQ = 0x65, + OPC1_32_ABS_LD_Q = 0x45, + OPC1_32_ABS_LEA = 0xc5, +/* ABSB Format */ + OPC1_32_ABSB_ST_T = 0xd5, +/* B Format */ + OPC1_32_B_CALL = 0x6d, + OPC1_32_B_CALLA = 0xed, + OPC1_32_B_FCALL = 0x61, + OPC1_32_B_FCALLA = 0xe1, + OPC1_32_B_J = 0x1d, + OPC1_32_B_JA = 0x9d, + OPC1_32_B_JL = 0x5d, + OPC1_32_B_JLA = 0xdd, +/* Bit Format */ + OPCM_32_BIT_ANDACC = 0x47, + OPCM_32_BIT_LOGICAL_T1 = 0x87, + OPCM_32_BIT_INSERT = 0x67, + OPCM_32_BIT_LOGICAL_T2 = 0x07, + OPCM_32_BIT_ORAND = 0xc7, + OPCM_32_BIT_SH_LOGIC1 = 0x27, + OPCM_32_BIT_SH_LOGIC2 = 0xa7, +/* BO Format */ + OPCM_32_BO_ADDRMODE_POST_PRE_BASE = 0x89, + OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR = 0xa9, + OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE = 0x09, + OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR = 0x29, + OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE = 0x49, + OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR = 0x69, +/* BOL Format */ + OPC1_32_BOL_LD_A_LONGOFF = 0x99, + OPC1_32_BOL_LD_W_LONGOFF = 0x19, + OPC1_32_BOL_LEA_LONGOFF = 0xd9, + OPC1_32_BOL_ST_W_LONGOFF = 0x59, + OPC1_32_BOL_ST_A_LONGOFF = 0xb5, /* 1.6 only */ + OPC1_32_BOL_LD_B_LONGOFF = 0x79, /* 1.6 only */ + OPC1_32_BOL_LD_BU_LONGOFF = 0x39, /* 1.6 only */ + OPC1_32_BOL_LD_H_LONGOFF = 0xc9, /* 1.6 only */ + OPC1_32_BOL_LD_HU_LONGOFF = 0xb9, /* 1.6 only */ + OPC1_32_BOL_ST_B_LONGOFF = 0xe9, /* 1.6 only */ + OPC1_32_BOL_ST_H_LONGOFF = 0xf9, /* 1.6 only */ +/* BRC Format */ + OPCM_32_BRC_EQ_NEQ = 0xdf, + OPCM_32_BRC_GE = 0xff, + OPCM_32_BRC_JLT = 0xbf, + OPCM_32_BRC_JNE = 0x9f, +/* BRN Format */ + OPCM_32_BRN_JTT = 0x6f, +/* BRR Format */ + OPCM_32_BRR_EQ_NEQ = 0x5f, + OPCM_32_BRR_ADDR_EQ_NEQ = 0x7d, + OPCM_32_BRR_GE = 0x7f, + OPCM_32_BRR_JLT = 0x3f, + OPCM_32_BRR_JNE = 0x1f, + OPCM_32_BRR_JNZ = 0xbd, + OPCM_32_BRR_LOOP = 0xfd, +/* RC Format */ + OPCM_32_RC_LOGICAL_SHIFT = 0x8f, + OPCM_32_RC_ACCUMULATOR = 0x8b, + OPCM_32_RC_SERVICEROUTINE = 0xad, + OPCM_32_RC_MUL = 0x53, +/* RCPW Format */ + OPCM_32_RCPW_MASK_INSERT = 0xb7, +/* RCR Format */ + OPCM_32_RCR_COND_SELECT = 0xab, + OPCM_32_RCR_MADD = 0x13, + OPCM_32_RCR_MSUB = 0x33, +/* RCRR Format */ + OPC1_32_RCRR_INSERT = 0x97, +/* RCRW Format */ + OPCM_32_RCRW_MASK_INSERT = 0xd7, +/* RLC Format */ + OPC1_32_RLC_ADDI = 0x1b, + OPC1_32_RLC_ADDIH = 0x9b, + OPC1_32_RLC_ADDIH_A = 0x11, + OPC1_32_RLC_MFCR = 0x4d, + OPC1_32_RLC_MOV = 0x3b, + OPC1_32_RLC_MOV_64 = 0xfb, /* 1.6 only */ + OPC1_32_RLC_MOV_U = 0xbb, + OPC1_32_RLC_MOV_H = 0x7b, + OPC1_32_RLC_MOVH_A = 0x91, + OPC1_32_RLC_MTCR = 0xcd, +/* RR Format */ + OPCM_32_RR_LOGICAL_SHIFT = 0x0f, + OPCM_32_RR_ACCUMULATOR = 0x0b, + OPCM_32_RR_ADDRESS = 0x01, + OPCM_32_RR_DIVIDE = 0x4b, + OPCM_32_RR_IDIRECT = 0x2d, +/* RR1 Format */ + OPCM_32_RR1_MUL = 0xb3, + OPCM_32_RR1_MULQ = 0x93, +/* RR2 Format */ + OPCM_32_RR2_MUL = 0x73, +/* RRPW Format */ + OPCM_32_RRPW_EXTRACT_INSERT = 0x37, + OPC1_32_RRPW_DEXTR = 0x77, +/* RRR Format */ + OPCM_32_RRR_COND_SELECT = 0x2b, + OPCM_32_RRR_DIVIDE = 0x6b, +/* RRR1 Format */ + OPCM_32_RRR1_MADD = 0x83, + OPCM_32_RRR1_MADDQ_H = 0x43, + OPCM_32_RRR1_MADDSU_H = 0xc3, + OPCM_32_RRR1_MSUB_H = 0xa3, + OPCM_32_RRR1_MSUB_Q = 0x63, + OPCM_32_RRR1_MSUBAD_H = 0xe3, +/* RRR2 Format */ + OPCM_32_RRR2_MADD = 0x03, + OPCM_32_RRR2_MSUB = 0x23, +/* RRRR Format */ + OPCM_32_RRRR_EXTRACT_INSERT = 0x17, +/* RRRW Format */ + OPCM_32_RRRW_EXTRACT_INSERT = 0x57, +/* SYS Format */ + OPCM_32_SYS_INTERRUPTS = 0x0d, + OPC1_32_SYS_RSTV = 0x2f, +}; + + + +/* + * ABS Format + */ + +/* OPCM_32_ABS_LDW */ +enum { + + OPC2_32_ABS_LD_A = 0x02, + OPC2_32_ABS_LD_D = 0x01, + OPC2_32_ABS_LD_DA = 0x03, + OPC2_32_ABS_LD_W = 0x00, +}; + +/* OPCM_32_ABS_LDB */ +enum { + OPC2_32_ABS_LD_B = 0x00, + OPC2_32_ABS_LD_BU = 0x01, + OPC2_32_ABS_LD_H = 0x02, + OPC2_32_ABS_LD_HU = 0x03, +}; +/* OPCM_32_ABS_LDMST_SWAP */ +enum { + OPC2_32_ABS_LDMST = 0x01, + OPC2_32_ABS_SWAP_W = 0x00, +}; +/* OPCM_32_ABS_LDST_CONTEXT */ +enum { + OPC2_32_ABS_LDLCX = 0x02, + OPC2_32_ABS_LDUCX = 0x03, + OPC2_32_ABS_STLCX = 0x00, + OPC2_32_ABS_STUCX = 0x01, +}; +/* OPCM_32_ABS_STORE */ +enum { + OPC2_32_ABS_ST_A = 0x02, + OPC2_32_ABS_ST_D = 0x01, + OPC2_32_ABS_ST_DA = 0x03, + OPC2_32_ABS_ST_W = 0x00, +}; +/* OPCM_32_ABS_STOREB_H */ +enum { + OPC2_32_ABS_ST_B = 0x00, + OPC2_32_ABS_ST_H = 0x02, +}; +/* + * Bit Format + */ +/* OPCM_32_BIT_ANDACC */ +enum { + OPC2_32_BIT_AND_AND_T = 0x00, + OPC2_32_BIT_AND_ANDN_T = 0x03, + OPC2_32_BIT_AND_NOR_T = 0x02, + OPC2_32_BIT_AND_OR_T = 0x01, +}; +/* OPCM_32_BIT_LOGICAL_T */ +enum { + OPC2_32_BIT_AND_T = 0x00, + OPC2_32_BIT_ANDN_T = 0x03, + OPC2_32_BIT_NOR_T = 0x02, + OPC2_32_BIT_OR_T = 0x01, +}; +/* OPCM_32_BIT_INSERT */ +enum { + OPC2_32_BIT_INS_T = 0x00, + OPC2_32_BIT_INSN_T = 0x01, +}; +/* OPCM_32_BIT_LOGICAL_T2 */ +enum { + OPC2_32_BIT_NAND_T = 0x00, + OPC2_32_BIT_ORN_T = 0x01, + OPC2_32_BIT_XNOR_T = 0x02, + OPC2_32_BIT_XOR_T = 0x03, +}; +/* OPCM_32_BIT_ORAND */ +enum { + OPC2_32_BIT_OR_AND_T = 0x00, + OPC2_32_BIT_OR_ANDN_T = 0x03, + OPC2_32_BIT_OR_NOR_T = 0x02, + OPC2_32_BIT_OR_OR_T = 0x01, +}; +/*OPCM_32_BIT_SH_LOGIC1 */ +enum { + OPC2_32_BIT_SH_AND_T = 0x00, + OPC2_32_BIT_SH_ANDN_T = 0x03, + OPC2_32_BIT_SH_NOR_T = 0x02, + OPC2_32_BIT_SH_OR_T = 0x01, +}; +/* OPCM_32_BIT_SH_LOGIC2 */ +enum { + OPC2_32_BIT_SH_NAND_T = 0x00, + OPC2_32_BIT_SH_ORN_T = 0x01, + OPC2_32_BIT_SH_XNOR_T = 0x02, + OPC2_32_BIT_SH_XOR_T = 0x03, +}; +/* + * BO Format + */ +/* OPCM_32_BO_ADDRMODE_POST_PRE_BASE */ +enum { + OPC2_32_BO_CACHEA_I_SHORTOFF = 0x2e, + OPC2_32_BO_CACHEA_I_POSTINC = 0x0e, + OPC2_32_BO_CACHEA_I_PREINC = 0x1e, + OPC2_32_BO_CACHEA_W_SHORTOFF = 0x2c, + OPC2_32_BO_CACHEA_W_POSTINC = 0x0c, + OPC2_32_BO_CACHEA_W_PREINC = 0x1c, + OPC2_32_BO_CACHEA_WI_SHORTOFF = 0x2d, + OPC2_32_BO_CACHEA_WI_POSTINC = 0x0d, + OPC2_32_BO_CACHEA_WI_PREINC = 0x1d, + /* 1.3.1 only */ + OPC2_32_BO_CACHEI_W_SHORTOFF = 0x2b, + OPC2_32_BO_CACHEI_W_POSTINC = 0x0b, + OPC2_32_BO_CACHEI_W_PREINC = 0x1b, + OPC2_32_BO_CACHEI_WI_SHORTOFF = 0x2f, + OPC2_32_BO_CACHEI_WI_POSTINC = 0x0f, + OPC2_32_BO_CACHEI_WI_PREINC = 0x1f, + /* end 1.3.1 only */ + OPC2_32_BO_ST_A_SHORTOFF = 0x26, + OPC2_32_BO_ST_A_POSTINC = 0x06, + OPC2_32_BO_ST_A_PREINC = 0x16, + OPC2_32_BO_ST_B_SHORTOFF = 0x20, + OPC2_32_BO_ST_B_POSTINC = 0x00, + OPC2_32_BO_ST_B_PREINC = 0x10, + OPC2_32_BO_ST_D_SHORTOFF = 0x25, + OPC2_32_BO_ST_D_POSTINC = 0x05, + OPC2_32_BO_ST_D_PREINC = 0x15, + OPC2_32_BO_ST_DA_SHORTOFF = 0x27, + OPC2_32_BO_ST_DA_POSTINC = 0x07, + OPC2_32_BO_ST_DA_PREINC = 0x17, + OPC2_32_BO_ST_H_SHORTOFF = 0x22, + OPC2_32_BO_ST_H_POSTINC = 0x02, + OPC2_32_BO_ST_H_PREINC = 0x12, + OPC2_32_BO_ST_Q_SHORTOFF = 0x28, + OPC2_32_BO_ST_Q_POSTINC = 0x08, + OPC2_32_BO_ST_Q_PREINC = 0x18, + OPC2_32_BO_ST_W_SHORTOFF = 0x24, + OPC2_32_BO_ST_W_POSTINC = 0x04, + OPC2_32_BO_ST_W_PREINC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_BITREVERSE_CIRCULAR */ +enum { + OPC2_32_BO_CACHEA_I_BR = 0x0e, + OPC2_32_BO_CACHEA_I_CIRC = 0x1e, + OPC2_32_BO_CACHEA_W_BR = 0x0c, + OPC2_32_BO_CACHEA_W_CIRC = 0x1c, + OPC2_32_BO_CACHEA_WI_BR = 0x0d, + OPC2_32_BO_CACHEA_WI_CIRC = 0x1d, + OPC2_32_BO_ST_A_BR = 0x06, + OPC2_32_BO_ST_A_CIRC = 0x16, + OPC2_32_BO_ST_B_BR = 0x00, + OPC2_32_BO_ST_B_CIRC = 0x10, + OPC2_32_BO_ST_D_BR = 0x05, + OPC2_32_BO_ST_D_CIRC = 0x15, + OPC2_32_BO_ST_DA_BR = 0x07, + OPC2_32_BO_ST_DA_CIRC = 0x17, + OPC2_32_BO_ST_H_BR = 0x02, + OPC2_32_BO_ST_H_CIRC = 0x12, + OPC2_32_BO_ST_Q_BR = 0x08, + OPC2_32_BO_ST_Q_CIRC = 0x18, + OPC2_32_BO_ST_W_BR = 0x04, + OPC2_32_BO_ST_W_CIRC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_LD_POST_PRE_BASE */ +enum { + OPC2_32_BO_LD_A_SHORTOFF = 0x26, + OPC2_32_BO_LD_A_POSTINC = 0x06, + OPC2_32_BO_LD_A_PREINC = 0x16, + OPC2_32_BO_LD_B_SHORTOFF = 0x20, + OPC2_32_BO_LD_B_POSTINC = 0x00, + OPC2_32_BO_LD_B_PREINC = 0x10, + OPC2_32_BO_LD_BU_SHORTOFF = 0x21, + OPC2_32_BO_LD_BU_POSTINC = 0x01, + OPC2_32_BO_LD_BU_PREINC = 0x11, + OPC2_32_BO_LD_D_SHORTOFF = 0x25, + OPC2_32_BO_LD_D_POSTINC = 0x05, + OPC2_32_BO_LD_D_PREINC = 0x15, + OPC2_32_BO_LD_DA_SHORTOFF = 0x27, + OPC2_32_BO_LD_DA_POSTINC = 0x07, + OPC2_32_BO_LD_DA_PREINC = 0x17, + OPC2_32_BO_LD_H_SHORTOFF = 0x22, + OPC2_32_BO_LD_H_POSTINC = 0x02, + OPC2_32_BO_LD_H_PREINC = 0x12, + OPC2_32_BO_LD_HU_SHORTOFF = 0x23, + OPC2_32_BO_LD_HU_POSTINC = 0x03, + OPC2_32_BO_LD_HU_PREINC = 0x13, + OPC2_32_BO_LD_Q_SHORTOFF = 0x28, + OPC2_32_BO_LD_Q_POSTINC = 0x08, + OPC2_32_BO_LD_Q_PREINC = 0x18, + OPC2_32_BO_LD_W_SHORTOFF = 0x24, + OPC2_32_BO_LD_W_POSTINC = 0x04, + OPC2_32_BO_LD_W_PREINC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_LD_BITREVERSE_CIRCULAR */ +enum { + OPC2_32_BO_LD_A_BR = 0x06, + OPC2_32_BO_LD_A_CIRC = 0x16, + OPC2_32_BO_LD_B_BR = 0x00, + OPC2_32_BO_LD_B_CIRC = 0x10, + OPC2_32_BO_LD_BU_BR = 0x01, + OPC2_32_BO_LD_BU_CIRC = 0x11, + OPC2_32_BO_LD_D_BR = 0x05, + OPC2_32_BO_LD_D_CIRC = 0x15, + OPC2_32_BO_LD_DA_BR = 0x07, + OPC2_32_BO_LD_DA_CIRC = 0x17, + OPC2_32_BO_LD_H_BR = 0x02, + OPC2_32_BO_LD_H_CIRC = 0x12, + OPC2_32_BO_LD_HU_BR = 0x03, + OPC2_32_BO_LD_HU_CIRC = 0x13, + OPC2_32_BO_LD_Q_BR = 0x08, + OPC2_32_BO_LD_Q_CIRC = 0x18, + OPC2_32_BO_LD_W_BR = 0x04, + OPC2_32_BO_LD_W_CIRC = 0x14, +}; +/* OPCM_32_BO_ADDRMODE_STCTX_POST_PRE_BASE */ +enum { + OPC2_32_BO_LDLCX_SHORTOFF = 0x24, + OPC2_32_BO_LDMST_SHORTOFF = 0x21, + OPC2_32_BO_LDMST_POSTINC = 0x01, + OPC2_32_BO_LDMST_PREINC = 0x11, + OPC2_32_BO_LDUCX_SHORTOFF = 0x25, + OPC2_32_BO_LEA_SHORTOFF = 0x28, + OPC2_32_BO_STLCX_SHORTOFF = 0x26, + OPC2_32_BO_STUCX_SHORTOFF = 0x27, + OPC2_32_BO_SWAP_W_SHORTOFF = 0x20, + OPC2_32_BO_SWAP_W_POSTINC = 0x00, + OPC2_32_BO_SWAP_W_PREINC = 0x10, + OPC2_32_BO_CMPSWAP_W_SHORTOFF = 0x23, + OPC2_32_BO_CMPSWAP_W_POSTINC = 0x03, + OPC2_32_BO_CMPSWAP_W_PREINC = 0x13, + OPC2_32_BO_SWAPMSK_W_SHORTOFF = 0x22, + OPC2_32_BO_SWAPMSK_W_POSTINC = 0x02, + OPC2_32_BO_SWAPMSK_W_PREINC = 0x12, +}; +/*OPCM_32_BO_ADDRMODE_LDMST_BITREVERSE_CIRCULAR */ +enum { + OPC2_32_BO_LDMST_BR = 0x01, + OPC2_32_BO_LDMST_CIRC = 0x11, + OPC2_32_BO_SWAP_W_BR = 0x00, + OPC2_32_BO_SWAP_W_CIRC = 0x10, + OPC2_32_BO_CMPSWAP_W_BR = 0x03, + OPC2_32_BO_CMPSWAP_W_CIRC = 0x13, + OPC2_32_BO_SWAPMSK_W_BR = 0x02, + OPC2_32_BO_SWAPMSK_W_CIRC = 0x12, +}; +/* + * BRC Format + */ +/*OPCM_32_BRC_EQ_NEQ */ +enum { + OPC2_32_BRC_JEQ = 0x00, + OPC2_32_BRC_JNE = 0x01, +}; +/* OPCM_32_BRC_GE */ +enum { + OP2_32_BRC_JGE = 0x00, + OPC_32_BRC_JGE_U = 0x01, +}; +/* OPCM_32_BRC_JLT */ +enum { + OPC2_32_BRC_JLT = 0x00, + OPC2_32_BRC_JLT_U = 0x01, +}; +/* OPCM_32_BRC_JNE */ +enum { + OPC2_32_BRC_JNED = 0x01, + OPC2_32_BRC_JNEI = 0x00, +}; +/* + * BRN Format + */ +/* OPCM_32_BRN_JTT */ +enum { + OPC2_32_BRN_JNZ_T = 0x01, + OPC2_32_BRN_JZ_T = 0x00, +}; +/* + * BRR Format + */ +/* OPCM_32_BRR_EQ_NEQ */ +enum { + OPC2_32_BRR_JEQ = 0x00, + OPC2_32_BRR_JNE = 0x01, +}; +/* OPCM_32_BRR_ADDR_EQ_NEQ */ +enum { + OPC2_32_BRR_JEQ_A = 0x00, + OPC2_32_BRR_JNE_A = 0x01, +}; +/*OPCM_32_BRR_GE */ +enum { + OPC2_32_BRR_JGE = 0x00, + OPC2_32_BRR_JGE_U = 0x01, +}; +/* OPCM_32_BRR_JLT */ +enum { + OPC2_32_BRR_JLT = 0x00, + OPC2_32_BRR_JLT_U = 0x01, +}; +/* OPCM_32_BRR_JNE */ +enum { + OPC2_32_BRR_JNED = 0x01, + OPC2_32_BRR_JNEI = 0x00, +}; +/* OPCM_32_BRR_JNZ */ +enum { + OPC2_32_BRR_JNZ_A = 0x01, + OPC2_32_BRR_JZ_A = 0x00, +}; +/* OPCM_32_BRR_LOOP */ +enum { + OPC2_32_BRR_LOOP = 0x00, + OPC2_32_BRR_LOOPU = 0x01, +}; +/* + * RC Format + */ +/* OPCM_32_RC_LOGICAL_SHIFT */ +enum { + OPC2_32_RC_AND = 0x08, + OPC2_32_RC_ANDN = 0x0e, + OPC2_32_RC_NAND = 0x09, + OPC2_32_RC_NOR = 0x0b, + OPC2_32_RC_OR = 0x0a, + OPC2_32_RC_ORN = 0x0f, + OPC2_32_RC_SH = 0x00, + OPC2_32_RC_SH_H = 0x40, + OPC2_32_RC_SHA = 0x01, + OPC2_32_RC_SHA_H = 0x41, + OPC2_32_RC_SHAS = 0x02, + OPC2_32_RC_XNOR = 0x0d, + OPC2_32_RC_XOR = 0x0c, +}; +/* OPCM_32_RC_ACCUMULATOR */ +enum { + OPC2_32_RC_ABSDIF = 0x0e, + OPC2_32_RC_ABSDIFS = 0x0f, + OPC2_32_RC_ADD = 0x00, + OPC2_32_RC_ADDC = 0x05, + OPC2_32_RC_ADDS = 0x02, + OPC2_32_RC_ADDS_U = 0x03, + OPC2_32_RC_ADDX = 0x04, + OPC2_32_RC_AND_EQ = 0x20, + OPC2_32_RC_AND_GE = 0x24, + OPC2_32_RC_AND_GE_U = 0x25, + OPC2_32_RC_AND_LT = 0x22, + OPC2_32_RC_AND_LT_U = 0x23, + OPC2_32_RC_AND_NE = 0x21, + OPC2_32_RC_EQ = 0x10, + OPC2_32_RC_EQANY_B = 0x56, + OPC2_32_RC_EQANY_H = 0x76, + OPC2_32_RC_GE = 0x14, + OPC2_32_RC_GE_U = 0x15, + OPC2_32_RC_LT = 0x12, + OPC2_32_RC_LT_U = 0x13, + OPC2_32_RC_MAX = 0x1a, + OPC2_32_RC_MAX_U = 0x1b, + OPC2_32_RC_MIN = 0x18, + OPC2_32_RC_MIN_U = 0x19, + OPC2_32_RC_NE = 0x11, + OPC2_32_RC_OR_EQ = 0x27, + OPC2_32_RC_OR_GE = 0x2b, + OPC2_32_RC_OR_GE_U = 0x2c, + OPC2_32_RC_OR_LT = 0x29, + OPC2_32_RC_OR_LT_U = 0x2a, + OPC2_32_RC_OR_NE = 0x28, + OPC2_32_RC_RSUB = 0x08, + OPC2_32_RC_RSUBS = 0x0a, + OPC2_32_RC_RSUBS_U = 0x0b, + OPC2_32_RC_SH_EQ = 0x37, + OPC2_32_RC_SH_GE = 0x3b, + OPC2_32_RC_SH_GE_U = 0x3c, + OPC2_32_RC_SH_LT = 0x39, + OPC2_32_RC_SH_LT_U = 0x3a, + OPC2_32_RC_SH_NE = 0x38, + OPC2_32_RC_XOR_EQ = 0x2f, + OPC2_32_RC_XOR_GE = 0x33, + OPC2_32_RC_XOR_GE_U = 0x34, + OPC2_32_RC_XOR_LT = 0x31, + OPC2_32_RC_XOR_LT_U = 0x32, + OPC2_32_RC_XOR_NE = 0x30, +}; +/* OPCM_32_RC_SERVICEROUTINE */ +enum { + OPC2_32_RC_BISR = 0x00, + OPC2_32_RC_SYSCALL = 0x04, +}; +/* OPCM_32_RC_MUL */ +enum { + OPC2_32_RC_MUL_32 = 0x01, + OPC2_32_RC_MUL_64 = 0x03, + OPC2_32_RC_MULS_32 = 0x05, + OPC2_32_RC_MUL_U_64 = 0x02, + OPC2_32_RC_MULS_U_32 = 0x04, +}; +/* + * RCPW Format + */ +/* OPCM_32_RCPW_MASK_INSERT */ +enum { + OPC2_32_RCPW_IMASK = 0x01, + OPC2_32_RCPW_INSERT = 0x00, +}; +/* + * RCR Format + */ +/* OPCM_32_RCR_COND_SELECT */ +enum { + OPC2_32_RCR_CADD = 0x00, + OPC2_32_RCR_CADDN = 0x01, + OPC2_32_RCR_SEL = 0x04, + OPC2_32_RCR_SELN = 0x05, +}; +/* OPCM_32_RCR_MADD */ +enum { + OPC2_32_RCR_MADD_32 = 0x01, + OPC2_32_RCR_MADD_64 = 0x03, + OPC2_32_RCR_MADDS_32 = 0x05, + OPC2_32_RCR_MADDS_64 = 0x07, + OPC2_32_RCR_MADD_U_64 = 0x02, + OPC2_32_RCR_MADDS_U_32 = 0x04, + OPC2_32_RCR_MADDS_U_64 = 0x06, +}; +/* OPCM_32_RCR_MSUB */ +enum { + OPC2_32_RCR_MSUB_32 = 0x01, + OPC2_32_RCR_MSUB_64 = 0x03, + OPC2_32_RCR_MSUBS_32 = 0x05, + OPC2_32_RCR_MSUBS_64 = 0x07, + OPC2_32_RCR_MSUB_U_64 = 0x02, + OPC2_32_RCR_MSUBS_U_32 = 0x04, + OPC2_32_RCR_MSUBS_U_64 = 0x06, +}; +/* + * RCRW Format + */ +/* OPCM_32_RCRW_MASK_INSERT */ +enum { + OPC2_32_RCRW_IMASK = 0x01, + OPC2_32_RCRW_INSERT = 0x00, +}; + +/* + * RR Format + */ +/* OPCM_32_RR_LOGICAL_SHIFT */ +enum { + OPC2_32_RR_AND = 0x08, + OPC2_32_RR_ANDN = 0x0e, + OPC2_32_RR_CLO = 0x1c, + OPC2_32_RR_CLO_H = 0x7d, + OPC2_32_RR_CLS = 0x1d, + OPC2_32_RR_CLS_H = 0x7e, + OPC2_32_RR_CLZ = 0x1b, + OPC2_32_RR_CLZ_H = 0x7c, + OPC2_32_RR_NAND = 0x09, + OPC2_32_RR_NOR = 0x0b, + OPC2_32_RR_OR = 0x0a, + OPC2_32_RR_ORN = 0x0f, + OPC2_32_RR_SH = 0x00, + OPC2_32_RR_SH_H = 0x40, + OPC2_32_RR_SHA = 0x01, + OPC2_32_RR_SHA_H = 0x41, + OPC2_32_RR_SHAS = 0x02, + OPC2_32_RR_XNOR = 0x0d, + OPC2_32_RR_XOR = 0x0c, +}; +/* OPCM_32_RR_ACCUMULATOR */ +enum { + OPC2_32_RR_ABS = 0x1c, + OPC2_32_RR_ABS_B = 0x5c, + OPC2_32_RR_ABS_H = 0x7c, + OPC2_32_RR_ABSDIF = 0x0e, + OPC2_32_RR_ABSDIF_B = 0x4e, + OPC2_32_RR_ABSDIF_H = 0x6e, + OPC2_32_RR_ABSDIFS = 0x0f, + OPC2_32_RR_ABSDIFS_H = 0x6f, + OPC2_32_RR_ABSS = 0x1d, + OPC2_32_RR_ABSS_H = 0x7d, + OPC2_32_RR_ADD = 0x00, + OPC2_32_RR_ADD_B = 0x40, + OPC2_32_RR_ADD_H = 0x60, + OPC2_32_RR_ADDC = 0x05, + OPC2_32_RR_ADDS = 0x02, + OPC2_32_RR_ADDS_H = 0x62, + OPC2_32_RR_ADDS_HU = 0x63, + OPC2_32_RR_ADDS_U = 0x03, + OPC2_32_RR_ADDX = 0x04, + OPC2_32_RR_AND_EQ = 0x20, + OPC2_32_RR_AND_GE = 0x24, + OPC2_32_RR_AND_GE_U = 0x25, + OPC2_32_RR_AND_LT = 0x22, + OPC2_32_RR_AND_LT_U = 0x23, + OPC2_32_RR_AND_NE = 0x21, + OPC2_32_RR_EQ = 0x10, + OPC2_32_RR_EQ_B = 0x50, + OPC2_32_RR_EQ_H = 0x70, + OPC2_32_RR_EQ_W = 0x90, + OPC2_32_RR_EQANY_B = 0x56, + OPC2_32_RR_EQANY_H = 0x76, + OPC2_32_RR_GE = 0x14, + OPC2_32_RR_GE_U = 0x15, + OPC2_32_RR_LT = 0x12, + OPC2_32_RR_LT_U = 0x13, + OPC2_32_RR_LT_B = 0x52, + OPC2_32_RR_LT_BU = 0x53, + OPC2_32_RR_LT_H = 0x72, + OPC2_32_RR_LT_HU = 0x73, + OPC2_32_RR_LT_W = 0x92, + OPC2_32_RR_LT_WU = 0x93, + OPC2_32_RR_MAX = 0x1a, + OPC2_32_RR_MAX_U = 0x1b, + OPC2_32_RR_MAX_B = 0x5a, + OPC2_32_RR_MAX_BU = 0x5b, + OPC2_32_RR_MAX_H = 0x7a, + OPC2_32_RR_MAX_HU = 0x7b, + OPC2_32_RR_MIN = 0x18, + OPC2_32_RR_MIN_U = 0x19, + OPC2_32_RR_MIN_B = 0x58, + OPC2_32_RR_MIN_BU = 0x59, + OPC2_32_RR_MIN_H = 0x78, + OPC2_32_RR_MIN_HU = 0x79, + OPC2_32_RR_MOV = 0x1f, + OPC2_32_RR_MOVS_64 = 0x80, + OPC2_32_RR_MOV_64 = 0x81, + OPC2_32_RR_NE = 0x11, + OPC2_32_RR_OR_EQ = 0x27, + OPC2_32_RR_OR_GE = 0x2b, + OPC2_32_RR_OR_GE_U = 0x2c, + OPC2_32_RR_OR_LT = 0x29, + OPC2_32_RR_OR_LT_U = 0x2a, + OPC2_32_RR_OR_NE = 0x28, + OPC2_32_RR_SAT_B = 0x5e, + OPC2_32_RR_SAT_BU = 0x5f, + OPC2_32_RR_SAT_H = 0x7e, + OPC2_32_RR_SAT_HU = 0x7f, + OPC2_32_RR_SH_EQ = 0x37, + OPC2_32_RR_SH_GE = 0x3b, + OPC2_32_RR_SH_GE_U = 0x3c, + OPC2_32_RR_SH_LT = 0x39, + OPC2_32_RR_SH_LT_U = 0x3a, + OPC2_32_RR_SH_NE = 0x38, + OPC2_32_RR_SUB = 0x08, + OPC2_32_RR_SUB_B = 0x48, + OPC2_32_RR_SUB_H = 0x68, + OPC2_32_RR_SUBC = 0x0d, + OPC2_32_RR_SUBS = 0x0a, + OPC2_32_RR_SUBS_U = 0x0b, + OPC2_32_RR_SUBS_H = 0x6a, + OPC2_32_RR_SUBS_HU = 0x6b, + OPC2_32_RR_SUBX = 0x0c, + OPC2_32_RR_XOR_EQ = 0x2f, + OPC2_32_RR_XOR_GE = 0x33, + OPC2_32_RR_XOR_GE_U = 0x34, + OPC2_32_RR_XOR_LT = 0x31, + OPC2_32_RR_XOR_LT_U = 0x32, + OPC2_32_RR_XOR_NE = 0x30, +}; +/* OPCM_32_RR_ADDRESS */ +enum { + OPC2_32_RR_ADD_A = 0x01, + OPC2_32_RR_ADDSC_A = 0x60, + OPC2_32_RR_ADDSC_AT = 0x62, + OPC2_32_RR_EQ_A = 0x40, + OPC2_32_RR_EQZ = 0x48, + OPC2_32_RR_GE_A = 0x43, + OPC2_32_RR_LT_A = 0x42, + OPC2_32_RR_MOV_A = 0x63, + OPC2_32_RR_MOV_AA = 0x00, + OPC2_32_RR_MOV_D = 0x4c, + OPC2_32_RR_NE_A = 0x41, + OPC2_32_RR_NEZ_A = 0x49, + OPC2_32_RR_SUB_A = 0x02, +}; +/* OPCM_32_RR_FLOAT */ +enum { + OPC2_32_RR_BMERGE = 0x01, + OPC2_32_RR_BSPLIT = 0x09, + OPC2_32_RR_DVINIT_B = 0x5a, + OPC2_32_RR_DVINIT_BU = 0x4a, + OPC2_32_RR_DVINIT_H = 0x3a, + OPC2_32_RR_DVINIT_HU = 0x2a, + OPC2_32_RR_DVINIT = 0x1a, + OPC2_32_RR_DVINIT_U = 0x0a, + OPC2_32_RR_PARITY = 0x02, + OPC2_32_RR_UNPACK = 0x08, + OPC2_32_RR_CRC32 = 0x03, + OPC2_32_RR_DIV = 0x20, + OPC2_32_RR_DIV_U = 0x21, + OPC2_32_RR_MUL_F = 0x04, + OPC2_32_RR_DIV_F = 0x05, + OPC2_32_RR_FTOI = 0x10, + OPC2_32_RR_ITOF = 0x14, + OPC2_32_RR_CMP_F = 0x00, + OPC2_32_RR_FTOIZ = 0x13, + OPC2_32_RR_FTOQ31 = 0x11, + OPC2_32_RR_FTOQ31Z = 0x18, + OPC2_32_RR_FTOU = 0x12, + OPC2_32_RR_FTOUZ = 0x17, + OPC2_32_RR_Q31TOF = 0x15, + OPC2_32_RR_QSEED_F = 0x19, + OPC2_32_RR_UPDFL = 0x0c, + OPC2_32_RR_UTOF = 0x16, +}; +/* OPCM_32_RR_IDIRECT */ +enum { + OPC2_32_RR_JI = 0x03, + OPC2_32_RR_JLI = 0x02, + OPC2_32_RR_CALLI = 0x00, + OPC2_32_RR_FCALLI = 0x01, +}; +/* + * RR1 Format + */ +/* OPCM_32_RR1_MUL */ +enum { + OPC2_32_RR1_MUL_H_32_LL = 0x1a, + OPC2_32_RR1_MUL_H_32_LU = 0x19, + OPC2_32_RR1_MUL_H_32_UL = 0x18, + OPC2_32_RR1_MUL_H_32_UU = 0x1b, + OPC2_32_RR1_MULM_H_64_LL = 0x1e, + OPC2_32_RR1_MULM_H_64_LU = 0x1d, + OPC2_32_RR1_MULM_H_64_UL = 0x1c, + OPC2_32_RR1_MULM_H_64_UU = 0x1f, + OPC2_32_RR1_MULR_H_16_LL = 0x0e, + OPC2_32_RR1_MULR_H_16_LU = 0x0d, + OPC2_32_RR1_MULR_H_16_UL = 0x0c, + OPC2_32_RR1_MULR_H_16_UU = 0x0f, +}; +/* OPCM_32_RR1_MULQ */ +enum { + OPC2_32_RR1_MUL_Q_32 = 0x02, + OPC2_32_RR1_MUL_Q_64 = 0x1b, + OPC2_32_RR1_MUL_Q_32_L = 0x01, + OPC2_32_RR1_MUL_Q_64_L = 0x19, + OPC2_32_RR1_MUL_Q_32_U = 0x00, + OPC2_32_RR1_MUL_Q_64_U = 0x18, + OPC2_32_RR1_MUL_Q_32_LL = 0x05, + OPC2_32_RR1_MUL_Q_32_UU = 0x04, + OPC2_32_RR1_MULR_Q_32_L = 0x07, + OPC2_32_RR1_MULR_Q_32_U = 0x06, +}; +/* + * RR2 Format + */ +/* OPCM_32_RR2_MUL */ +enum { + OPC2_32_RR2_MUL_32 = 0x0a, + OPC2_32_RR2_MUL_64 = 0x6a, + OPC2_32_RR2_MULS_32 = 0x8a, + OPC2_32_RR2_MUL_U_64 = 0x68, + OPC2_32_RR2_MULS_U_32 = 0x88, +}; +/* + * RRPW Format + */ +/* OPCM_32_RRPW_EXTRACT_INSERT */ +enum { + + OPC2_32_RRPW_EXTR = 0x02, + OPC2_32_RRPW_EXTR_U = 0x03, + OPC2_32_RRPW_IMASK = 0x01, + OPC2_32_RRPW_INSERT = 0x00, +}; +/* + * RRR Format + */ +/* OPCM_32_RRR_COND_SELECT */ +enum { + OPC2_32_RRR_CADD = 0x00, + OPC2_32_RRR_CADDN = 0x01, + OPC2_32_RRR_CSUB = 0x02, + OPC2_32_RRR_CSUBN = 0x03, + OPC2_32_RRR_SEL = 0x04, + OPC2_32_RRR_SELN = 0x05, +}; +/* OPCM_32_RRR_FLOAT */ +enum { + OPC2_32_RRR_DVADJ = 0x0d, + OPC2_32_RRR_DVSTEP = 0x0f, + OPC2_32_RRR_DVSTEP_U = 0x0e, + OPC2_32_RRR_IXMAX = 0x0a, + OPC2_32_RRR_IXMAX_U = 0x0b, + OPC2_32_RRR_IXMIN = 0x08, + OPC2_32_RRR_IXMIN_U = 0x09, + OPC2_32_RRR_PACK = 0x00, + OPC2_32_RRR_ADD_F = 0x02, + OPC2_32_RRR_SUB_F = 0x03, + OPC2_32_RRR_MADD_F = 0x06, + OPC2_32_RRR_MSUB_F = 0x07, +}; +/* + * RRR1 Format + */ +/* OPCM_32_RRR1_MADD */ +enum { + OPC2_32_RRR1_MADD_H_LL = 0x1a, + OPC2_32_RRR1_MADD_H_LU = 0x19, + OPC2_32_RRR1_MADD_H_UL = 0x18, + OPC2_32_RRR1_MADD_H_UU = 0x1b, + OPC2_32_RRR1_MADDS_H_LL = 0x3a, + OPC2_32_RRR1_MADDS_H_LU = 0x39, + OPC2_32_RRR1_MADDS_H_UL = 0x38, + OPC2_32_RRR1_MADDS_H_UU = 0x3b, + OPC2_32_RRR1_MADDM_H_LL = 0x1e, + OPC2_32_RRR1_MADDM_H_LU = 0x1d, + OPC2_32_RRR1_MADDM_H_UL = 0x1c, + OPC2_32_RRR1_MADDM_H_UU = 0x1f, + OPC2_32_RRR1_MADDMS_H_LL = 0x3e, + OPC2_32_RRR1_MADDMS_H_LU = 0x3d, + OPC2_32_RRR1_MADDMS_H_UL = 0x3c, + OPC2_32_RRR1_MADDMS_H_UU = 0x3f, + OPC2_32_RRR1_MADDR_H_LL = 0x0e, + OPC2_32_RRR1_MADDR_H_LU = 0x0d, + OPC2_32_RRR1_MADDR_H_UL = 0x0c, + OPC2_32_RRR1_MADDR_H_UU = 0x0f, + OPC2_32_RRR1_MADDRS_H_LL = 0x2e, + OPC2_32_RRR1_MADDRS_H_LU = 0x2d, + OPC2_32_RRR1_MADDRS_H_UL = 0x2c, + OPC2_32_RRR1_MADDRS_H_UU = 0x2f, +}; +/* OPCM_32_RRR1_MADDQ_H */ +enum { + OPC2_32_RRR1_MADD_Q_32 = 0x02, + OPC2_32_RRR1_MADD_Q_64 = 0x1b, + OPC2_32_RRR1_MADD_Q_32_L = 0x01, + OPC2_32_RRR1_MADD_Q_64_L = 0x19, + OPC2_32_RRR1_MADD_Q_32_U = 0x00, + OPC2_32_RRR1_MADD_Q_64_U = 0x18, + OPC2_32_RRR1_MADD_Q_32_LL = 0x05, + OPC2_32_RRR1_MADD_Q_64_LL = 0x1d, + OPC2_32_RRR1_MADD_Q_32_UU = 0x04, + OPC2_32_RRR1_MADD_Q_64_UU = 0x1c, + OPC2_32_RRR1_MADDS_Q_32 = 0x22, + OPC2_32_RRR1_MADDS_Q_64 = 0x3b, + OPC2_32_RRR1_MADDS_Q_32_L = 0x21, + OPC2_32_RRR1_MADDS_Q_64_L = 0x39, + OPC2_32_RRR1_MADDS_Q_32_U = 0x20, + OPC2_32_RRR1_MADDS_Q_64_U = 0x38, + OPC2_32_RRR1_MADDS_Q_32_LL = 0x25, + OPC2_32_RRR1_MADDS_Q_64_LL = 0x3d, + OPC2_32_RRR1_MADDS_Q_32_UU = 0x24, + OPC2_32_RRR1_MADDS_Q_64_UU = 0x3c, + OPC2_32_RRR1_MADDR_H_64_UL = 0x1e, + OPC2_32_RRR1_MADDRS_H_64_UL = 0x3e, + OPC2_32_RRR1_MADDR_Q_32_LL = 0x07, + OPC2_32_RRR1_MADDR_Q_32_UU = 0x06, + OPC2_32_RRR1_MADDRS_Q_32_LL = 0x27, + OPC2_32_RRR1_MADDRS_Q_32_UU = 0x26, +}; +/* OPCM_32_RRR1_MADDSU_H */ +enum { + OPC2_32_RRR1_MADDSU_H_32_LL = 0x1a, + OPC2_32_RRR1_MADDSU_H_32_LU = 0x19, + OPC2_32_RRR1_MADDSU_H_32_UL = 0x18, + OPC2_32_RRR1_MADDSU_H_32_UU = 0x1b, + OPC2_32_RRR1_MADDSUS_H_32_LL = 0x3a, + OPC2_32_RRR1_MADDSUS_H_32_LU = 0x39, + OPC2_32_RRR1_MADDSUS_H_32_UL = 0x38, + OPC2_32_RRR1_MADDSUS_H_32_UU = 0x3b, + OPC2_32_RRR1_MADDSUM_H_64_LL = 0x1e, + OPC2_32_RRR1_MADDSUM_H_64_LU = 0x1d, + OPC2_32_RRR1_MADDSUM_H_64_UL = 0x1c, + OPC2_32_RRR1_MADDSUM_H_64_UU = 0x1f, + OPC2_32_RRR1_MADDSUMS_H_64_LL = 0x3e, + OPC2_32_RRR1_MADDSUMS_H_64_LU = 0x3d, + OPC2_32_RRR1_MADDSUMS_H_64_UL = 0x3c, + OPC2_32_RRR1_MADDSUMS_H_64_UU = 0x3f, + OPC2_32_RRR1_MADDSUR_H_16_LL = 0x0e, + OPC2_32_RRR1_MADDSUR_H_16_LU = 0x0d, + OPC2_32_RRR1_MADDSUR_H_16_UL = 0x0c, + OPC2_32_RRR1_MADDSUR_H_16_UU = 0x0f, + OPC2_32_RRR1_MADDSURS_H_16_LL = 0x2e, + OPC2_32_RRR1_MADDSURS_H_16_LU = 0x2d, + OPC2_32_RRR1_MADDSURS_H_16_UL = 0x2c, + OPC2_32_RRR1_MADDSURS_H_16_UU = 0x2f, +}; +/* OPCM_32_RRR1_MSUB_H */ +enum { + OPC2_32_RRR1_MSUB_H_LL = 0x1a, + OPC2_32_RRR1_MSUB_H_LU = 0x19, + OPC2_32_RRR1_MSUB_H_UL = 0x18, + OPC2_32_RRR1_MSUB_H_UU = 0x1b, + OPC2_32_RRR1_MSUBS_H_LL = 0x3a, + OPC2_32_RRR1_MSUBS_H_LU = 0x39, + OPC2_32_RRR1_MSUBS_H_UL = 0x38, + OPC2_32_RRR1_MSUBS_H_UU = 0x3b, + OPC2_32_RRR1_MSUBM_H_LL = 0x1e, + OPC2_32_RRR1_MSUBM_H_LU = 0x1d, + OPC2_32_RRR1_MSUBM_H_UL = 0x1c, + OPC2_32_RRR1_MSUBM_H_UU = 0x1f, + OPC2_32_RRR1_MSUBMS_H_LL = 0x3e, + OPC2_32_RRR1_MSUBMS_H_LU = 0x3d, + OPC2_32_RRR1_MSUBMS_H_UL = 0x3c, + OPC2_32_RRR1_MSUBMS_H_UU = 0x3f, + OPC2_32_RRR1_MSUBR_H_LL = 0x0e, + OPC2_32_RRR1_MSUBR_H_LU = 0x0d, + OPC2_32_RRR1_MSUBR_H_UL = 0x0c, + OPC2_32_RRR1_MSUBR_H_UU = 0x0f, + OPC2_32_RRR1_MSUBRS_H_LL = 0x2e, + OPC2_32_RRR1_MSUBRS_H_LU = 0x2d, + OPC2_32_RRR1_MSUBRS_H_UL = 0x2c, + OPC2_32_RRR1_MSUBRS_H_UU = 0x2f, +}; +/* OPCM_32_RRR1_MSUB_Q */ +enum { + OPC2_32_RRR1_MSUB_Q_32 = 0x02, + OPC2_32_RRR1_MSUB_Q_64 = 0x1b, + OPC2_32_RRR1_MSUB_Q_32_L = 0x01, + OPC2_32_RRR1_MSUB_Q_64_L = 0x19, + OPC2_32_RRR1_MSUB_Q_32_U = 0x00, + OPC2_32_RRR1_MSUB_Q_64_U = 0x18, + OPC2_32_RRR1_MSUB_Q_32_LL = 0x05, + OPC2_32_RRR1_MSUB_Q_64_LL = 0x1d, + OPC2_32_RRR1_MSUB_Q_32_UU = 0x04, + OPC2_32_RRR1_MSUB_Q_64_UU = 0x1c, + OPC2_32_RRR1_MSUBS_Q_32 = 0x22, + OPC2_32_RRR1_MSUBS_Q_64 = 0x3b, + OPC2_32_RRR1_MSUBS_Q_32_L = 0x21, + OPC2_32_RRR1_MSUBS_Q_64_L = 0x39, + OPC2_32_RRR1_MSUBS_Q_32_U = 0x20, + OPC2_32_RRR1_MSUBS_Q_64_U = 0x38, + OPC2_32_RRR1_MSUBS_Q_32_LL = 0x25, + OPC2_32_RRR1_MSUBS_Q_64_LL = 0x3d, + OPC2_32_RRR1_MSUBS_Q_32_UU = 0x24, + OPC2_32_RRR1_MSUBS_Q_64_UU = 0x3c, + OPC2_32_RRR1_MSUBR_H_64_UL = 0x1e, + OPC2_32_RRR1_MSUBRS_H_64_UL = 0x3e, + OPC2_32_RRR1_MSUBR_Q_32_LL = 0x07, + OPC2_32_RRR1_MSUBR_Q_32_UU = 0x06, + OPC2_32_RRR1_MSUBRS_Q_32_LL = 0x27, + OPC2_32_RRR1_MSUBRS_Q_32_UU = 0x26, +}; +/* OPCM_32_RRR1_MSUBADS_H */ +enum { + OPC2_32_RRR1_MSUBAD_H_32_LL = 0x1a, + OPC2_32_RRR1_MSUBAD_H_32_LU = 0x19, + OPC2_32_RRR1_MSUBAD_H_32_UL = 0x18, + OPC2_32_RRR1_MSUBAD_H_32_UU = 0x1b, + OPC2_32_RRR1_MSUBADS_H_32_LL = 0x3a, + OPC2_32_RRR1_MSUBADS_H_32_LU = 0x39, + OPC2_32_RRR1_MSUBADS_H_32_UL = 0x38, + OPC2_32_RRR1_MSUBADS_H_32_UU = 0x3b, + OPC2_32_RRR1_MSUBADM_H_64_LL = 0x1e, + OPC2_32_RRR1_MSUBADM_H_64_LU = 0x1d, + OPC2_32_RRR1_MSUBADM_H_64_UL = 0x1c, + OPC2_32_RRR1_MSUBADM_H_64_UU = 0x1f, + OPC2_32_RRR1_MSUBADMS_H_64_LL = 0x3e, + OPC2_32_RRR1_MSUBADMS_H_64_LU = 0x3d, + OPC2_32_RRR1_MSUBADMS_H_64_UL = 0x3c, + OPC2_32_RRR1_MSUBADMS_H_64_UU = 0x3f, + OPC2_32_RRR1_MSUBADR_H_16_LL = 0x0e, + OPC2_32_RRR1_MSUBADR_H_16_LU = 0x0d, + OPC2_32_RRR1_MSUBADR_H_16_UL = 0x0c, + OPC2_32_RRR1_MSUBADR_H_16_UU = 0x0f, + OPC2_32_RRR1_MSUBADRS_H_16_LL = 0x2e, + OPC2_32_RRR1_MSUBADRS_H_16_LU = 0x2d, + OPC2_32_RRR1_MSUBADRS_H_16_UL = 0x2c, + OPC2_32_RRR1_MSUBADRS_H_16_UU = 0x2f, +}; +/* + * RRR2 Format + */ +/* OPCM_32_RRR2_MADD */ +enum { + OPC2_32_RRR2_MADD_32 = 0x0a, + OPC2_32_RRR2_MADD_64 = 0x6a, + OPC2_32_RRR2_MADDS_32 = 0x8a, + OPC2_32_RRR2_MADDS_64 = 0xea, + OPC2_32_RRR2_MADD_U_64 = 0x68, + OPC2_32_RRR2_MADDS_U_32 = 0x88, + OPC2_32_RRR2_MADDS_U_64 = 0xe8, +}; +/* OPCM_32_RRR2_MSUB */ +enum { + OPC2_32_RRR2_MSUB_32 = 0x0a, + OPC2_32_RRR2_MSUB_64 = 0x6a, + OPC2_32_RRR2_MSUBS_32 = 0x8a, + OPC2_32_RRR2_MSUBS_64 = 0xea, + OPC2_32_RRR2_MSUB_U_64 = 0x68, + OPC2_32_RRR2_MSUBS_U_32 = 0x88, + OPC2_32_RRR2_MSUBS_U_64 = 0xe8, +}; +/* + * RRRR Format + */ +/* OPCM_32_RRRR_EXTRACT_INSERT */ +enum { + OPC2_32_RRRR_DEXTR = 0x04, + OPC2_32_RRRR_EXTR = 0x02, + OPC2_32_RRRR_EXTR_U = 0x03, + OPC2_32_RRRR_INSERT = 0x00, +}; +/* + * RRRW Format + */ +/* OPCM_32_RRRW_EXTRACT_INSERT */ +enum { + OPC2_32_RRRW_EXTR = 0x02, + OPC2_32_RRRW_EXTR_U = 0x03, + OPC2_32_RRRW_IMASK = 0x01, + OPC2_32_RRRW_INSERT = 0x00, +}; +/* + * SYS Format + */ +/* OPCM_32_SYS_INTERRUPTS */ +enum { + OPC2_32_SYS_DEBUG = 0x04, + OPC2_32_SYS_DISABLE = 0x0d, + OPC2_32_SYS_DSYNC = 0x12, + OPC2_32_SYS_ENABLE = 0x0c, + OPC2_32_SYS_ISYNC = 0x13, + OPC2_32_SYS_NOP = 0x00, + OPC2_32_SYS_RET = 0x06, + OPC2_32_SYS_RFE = 0x07, + OPC2_32_SYS_RFM = 0x05, + OPC2_32_SYS_RSLCX = 0x09, + OPC2_32_SYS_SVLCX = 0x08, + OPC2_32_SYS_TRAPSV = 0x15, + OPC2_32_SYS_TRAPV = 0x14, + OPC2_32_SYS_RESTORE = 0x0e, + OPC2_32_SYS_FRET = 0x03, +}; + +#endif diff --git a/qemu/target/tricore/unicorn.c b/qemu/target/tricore/unicorn.c new file mode 100644 index 00000000..f217335f --- /dev/null +++ b/qemu/target/tricore/unicorn.c @@ -0,0 +1,270 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +/* + Created for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +#include "qemu/typedefs.h" +#include "unicorn/unicorn.h" +#include "sysemu/cpus.h" +#include "sysemu/tcg.h" +#include "cpu.h" +#include "uc_priv.h" +#include "unicorn_common.h" +#include "unicorn.h" + +TriCoreCPU *cpu_tricore_init(struct uc_struct *uc); + +void tricore_set_pc(struct uc_struct *uc, uint64_t address) +{ + ((CPUTriCoreState *)uc->cpu->env_ptr)->PC = address; +} + +void tricore_reg_reset(struct uc_struct *uc) +{ + CPUTriCoreState *env; + (void)uc; + + env = uc->cpu->env_ptr; + memset(env->gpr_a, 0, sizeof(env->gpr_a)); + memset(env->gpr_d, 0, sizeof(env->gpr_d)); + + env->PC = 0; +} + +static void reg_read(CPUTriCoreState *env, unsigned int regid, void *value) +{ + if (regid >= UC_TRICORE_REG_A0 && regid <= UC_TRICORE_REG_A9) + *(int32_t *)value = env->gpr_a[regid - UC_TRICORE_REG_A0]; + if (regid >= UC_TRICORE_REG_A12 && regid <= UC_TRICORE_REG_A15) + *(int32_t *)value = env->gpr_a[regid - UC_TRICORE_REG_A0]; + else if (regid >= UC_TRICORE_REG_D0 && regid <= UC_TRICORE_REG_D15) + *(int32_t *)value = env->gpr_d[regid - UC_TRICORE_REG_D0]; + else { + switch (regid) { + // case UC_TRICORE_REG_SP: + case UC_TRICORE_REG_A10: + *(int32_t *)value = env->gpr_a[10]; + break; + // case UC_TRICORE_REG_LR: + case UC_TRICORE_REG_A11: + *(int32_t *)value = env->gpr_a[11]; + break; + case UC_TRICORE_REG_PC: + *(int32_t *)value = env->PC; + break; + case UC_TRICORE_REG_PCXI: + *(int32_t *)value = env->PCXI; + break; + case UC_TRICORE_REG_PSW: + *(int32_t *)value = env->PSW; + break; + case UC_TRICORE_REG_PSW_USB_C: + *(int32_t *)value = env->PSW_USB_C; + break; + case UC_TRICORE_REG_PSW_USB_V: + *(int32_t *)value = env->PSW_USB_V; + break; + case UC_TRICORE_REG_PSW_USB_SV: + *(int32_t *)value = env->PSW_USB_SV; + break; + case UC_TRICORE_REG_PSW_USB_AV: + *(int32_t *)value = env->PSW_USB_AV; + break; + case UC_TRICORE_REG_PSW_USB_SAV: + *(int32_t *)value = env->PSW_USB_SAV; + break; + case UC_TRICORE_REG_SYSCON: + *(int32_t *)value = env->SYSCON; + break; + case UC_TRICORE_REG_CPU_ID: + *(int32_t *)value = env->CPU_ID; + break; + case UC_TRICORE_REG_BIV: + *(int32_t *)value = env->BIV; + break; + case UC_TRICORE_REG_BTV: + *(int32_t *)value = env->BTV; + break; + case UC_TRICORE_REG_ISP: + *(int32_t *)value = env->ISP; + break; + case UC_TRICORE_REG_ICR: + *(int32_t *)value = env->ICR; + break; + case UC_TRICORE_REG_FCX: + *(int32_t *)value = env->FCX; + break; + case UC_TRICORE_REG_LCX: + *(int32_t *)value = env->LCX; + break; + case UC_TRICORE_REG_COMPAT: + *(int32_t *)value = env->COMPAT; + break; + } + } +} + +int tricore_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, + int count) +{ + CPUTriCoreState *env = &(TRICORE_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +int tricore_context_reg_read(struct uc_context *uc, unsigned int *regs, + void **vals, int count) +{ + CPUTriCoreState *env = (CPUTriCoreState *)uc->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_read(env, regid, value); + } + + return 0; +} + +static void reg_write(CPUTriCoreState *env, unsigned int regid, + const void *value) +{ + if (regid >= UC_TRICORE_REG_A0 && regid <= UC_TRICORE_REG_A9) + env->gpr_a[regid - UC_TRICORE_REG_A0] = *(int32_t *)value; + if (regid >= UC_TRICORE_REG_A12 && regid <= UC_TRICORE_REG_A15) + env->gpr_a[regid - UC_TRICORE_REG_A0] = *(int32_t *)value; + else if (regid >= UC_TRICORE_REG_D0 && regid <= UC_TRICORE_REG_D15) + env->gpr_d[regid - UC_TRICORE_REG_D0] = *(int32_t *)value; + else { + switch (regid) { + // case UC_TRICORE_REG_SP: + case UC_TRICORE_REG_A10: + env->gpr_a[10] = *(int32_t *)value; + break; + // case UC_TRICORE_REG_LR: + case UC_TRICORE_REG_A11: + env->gpr_a[11] = *(int32_t *)value; + break; + case UC_TRICORE_REG_PC: + env->PC = *(int32_t *)value; + break; + case UC_TRICORE_REG_PCXI: + env->PCXI = *(int32_t *)value; + break; + case UC_TRICORE_REG_PSW: + env->PSW = *(int32_t *)value; + break; + case UC_TRICORE_REG_PSW_USB_C: + env->PSW_USB_C = *(int32_t *)value; + break; + case UC_TRICORE_REG_PSW_USB_V: + env->PSW_USB_V = *(int32_t *)value; + break; + case UC_TRICORE_REG_PSW_USB_SV: + env->PSW_USB_SV = *(int32_t *)value; + break; + case UC_TRICORE_REG_PSW_USB_AV: + env->PSW_USB_AV = *(int32_t *)value; + break; + case UC_TRICORE_REG_PSW_USB_SAV: + env->PSW_USB_SAV = *(int32_t *)value; + break; + case UC_TRICORE_REG_SYSCON: + env->SYSCON = *(int32_t *)value; + break; + case UC_TRICORE_REG_CPU_ID: + env->CPU_ID = *(int32_t *)value; + break; + case UC_TRICORE_REG_BIV: + env->BIV = *(int32_t *)value; + break; + case UC_TRICORE_REG_BTV: + env->BTV = *(int32_t *)value; + break; + case UC_TRICORE_REG_ISP: + env->ISP = *(int32_t *)value; + break; + case UC_TRICORE_REG_ICR: + env->ICR = *(int32_t *)value; + break; + case UC_TRICORE_REG_FCX: + env->FCX = *(int32_t *)value; + break; + case UC_TRICORE_REG_LCX: + env->LCX = *(int32_t *)value; + break; + case UC_TRICORE_REG_COMPAT: + env->COMPAT = *(int32_t *)value; + break; + } + } +} + +int tricore_reg_write(struct uc_struct *uc, unsigned int *regs, + void *const *vals, int count) +{ + CPUTriCoreState *env = &(TRICORE_CPU(uc->cpu)->env); + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + void *value = vals[i]; + reg_write(env, regid, value); + if (regid == UC_TRICORE_REG_PC) { + // force to quit execution and flush TB + uc->quit_request = true; + uc_emu_stop(uc); + } + } + + return 0; +} + +int tricore_context_reg_write(struct uc_context *uc, unsigned int *regs, + void *const *vals, int count) +{ + CPUTriCoreState *env = (CPUTriCoreState *)uc->data; + int i; + + for (i = 0; i < count; i++) { + unsigned int regid = regs[i]; + const void *value = vals[i]; + reg_write(env, regid, value); + } + + return 0; +} + +static int tricore_cpus_init(struct uc_struct *uc, const char *cpu_model) +{ + TriCoreCPU *cpu; + + cpu = cpu_tricore_init(uc); + if (cpu == NULL) { + return -1; + } + + return 0; +} + +void tricore_uc_init(struct uc_struct *uc) +{ + uc->reg_read = tricore_reg_read; + uc->reg_write = tricore_reg_write; + uc->reg_reset = tricore_reg_reset; + uc->set_pc = tricore_set_pc; + uc->cpus_init = tricore_cpus_init; + uc->cpu_context_size = offsetof(CPUTriCoreState, end_reset_fields); + uc_common_init(uc); +} \ No newline at end of file diff --git a/qemu/target/tricore/unicorn.h b/qemu/target/tricore/unicorn.h new file mode 100644 index 00000000..36d30ee7 --- /dev/null +++ b/qemu/target/tricore/unicorn.h @@ -0,0 +1,27 @@ +/* Unicorn Emulator Engine */ +/* By Nguyen Anh Quynh , 2015 */ + +/* + Modified for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +#ifndef UC_QEMU_TARGET_TRICORE_H +#define UC_QEMU_TARGET_TRICORE_H + +// functions to read & write registers +int tricore_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, + int count); +int tricore_reg_write(struct uc_struct *uc, unsigned int *regs, + void *const *vals, int count); + +int tricore_context_reg_read(struct uc_context *uc, unsigned int *regs, + void **vals, int count); +int tricore_context_reg_write(struct uc_context *uc, unsigned int *regs, + void *const *vals, int count); + +void tricore_reg_reset(struct uc_struct *uc); + +void tricore_uc_init(struct uc_struct *uc); + +#endif diff --git a/qemu/tricore.h b/qemu/tricore.h new file mode 100644 index 00000000..6603175f --- /dev/null +++ b/qemu/tricore.h @@ -0,0 +1,1289 @@ +/* Autogen header for Unicorn Engine - DONOT MODIFY */ +#ifndef UNICORN_AUTOGEN_tricore_H +#define UNICORN_AUTOGEN_tricore_H +#ifndef UNICORN_ARCH_POSTFIX +#define UNICORN_ARCH_POSTFIX _tricore +#endif +#define uc_add_inline_hook uc_add_inline_hook_tricore +#define uc_del_inline_hook uc_del_inline_hook_tricore +#define tb_invalidate_phys_range tb_invalidate_phys_range_tricore +#define use_idiv_instructions use_idiv_instructions_tricore +#define arm_arch arm_arch_tricore +#define tb_target_set_jmp_target tb_target_set_jmp_target_tricore +#define have_bmi1 have_bmi1_tricore +#define have_popcnt have_popcnt_tricore +#define have_avx1 have_avx1_tricore +#define have_avx2 have_avx2_tricore +#define have_isa have_isa_tricore +#define have_altivec have_altivec_tricore +#define have_vsx have_vsx_tricore +#define flush_icache_range flush_icache_range_tricore +#define s390_facilities s390_facilities_tricore +#define tcg_dump_op tcg_dump_op_tricore +#define tcg_dump_ops tcg_dump_ops_tricore +#define tcg_gen_and_i64 tcg_gen_and_i64_tricore +#define tcg_gen_discard_i64 tcg_gen_discard_i64_tricore +#define tcg_gen_ld16s_i64 tcg_gen_ld16s_i64_tricore +#define tcg_gen_ld16u_i64 tcg_gen_ld16u_i64_tricore +#define tcg_gen_ld32s_i64 tcg_gen_ld32s_i64_tricore +#define tcg_gen_ld32u_i64 tcg_gen_ld32u_i64_tricore +#define tcg_gen_ld8s_i64 tcg_gen_ld8s_i64_tricore +#define tcg_gen_ld8u_i64 tcg_gen_ld8u_i64_tricore +#define tcg_gen_ld_i64 tcg_gen_ld_i64_tricore +#define tcg_gen_mov_i64 tcg_gen_mov_i64_tricore +#define tcg_gen_movi_i64 tcg_gen_movi_i64_tricore +#define tcg_gen_mul_i64 tcg_gen_mul_i64_tricore +#define tcg_gen_or_i64 tcg_gen_or_i64_tricore +#define tcg_gen_sar_i64 tcg_gen_sar_i64_tricore +#define tcg_gen_shl_i64 tcg_gen_shl_i64_tricore +#define tcg_gen_shr_i64 tcg_gen_shr_i64_tricore +#define tcg_gen_st_i64 tcg_gen_st_i64_tricore +#define tcg_gen_xor_i64 tcg_gen_xor_i64_tricore +#define cpu_icount_to_ns cpu_icount_to_ns_tricore +#define cpu_is_stopped cpu_is_stopped_tricore +#define cpu_get_ticks cpu_get_ticks_tricore +#define cpu_get_clock cpu_get_clock_tricore +#define cpu_resume cpu_resume_tricore +#define qemu_init_vcpu qemu_init_vcpu_tricore +#define cpu_stop_current cpu_stop_current_tricore +#define resume_all_vcpus resume_all_vcpus_tricore +#define vm_start vm_start_tricore +#define address_space_dispatch_compact address_space_dispatch_compact_tricore +#define flatview_translate flatview_translate_tricore +#define address_space_translate_for_iotlb address_space_translate_for_iotlb_tricore +#define qemu_get_cpu qemu_get_cpu_tricore +#define cpu_address_space_init cpu_address_space_init_tricore +#define cpu_get_address_space cpu_get_address_space_tricore +#define cpu_exec_unrealizefn cpu_exec_unrealizefn_tricore +#define cpu_exec_initfn cpu_exec_initfn_tricore +#define cpu_exec_realizefn cpu_exec_realizefn_tricore +#define tb_invalidate_phys_addr tb_invalidate_phys_addr_tricore +#define cpu_watchpoint_insert cpu_watchpoint_insert_tricore +#define cpu_watchpoint_remove_by_ref cpu_watchpoint_remove_by_ref_tricore +#define cpu_watchpoint_remove_all cpu_watchpoint_remove_all_tricore +#define cpu_watchpoint_address_matches cpu_watchpoint_address_matches_tricore +#define cpu_breakpoint_insert cpu_breakpoint_insert_tricore +#define cpu_breakpoint_remove cpu_breakpoint_remove_tricore +#define cpu_breakpoint_remove_by_ref cpu_breakpoint_remove_by_ref_tricore +#define cpu_breakpoint_remove_all cpu_breakpoint_remove_all_tricore +#define cpu_abort cpu_abort_tricore +#define cpu_physical_memory_test_and_clear_dirty cpu_physical_memory_test_and_clear_dirty_tricore +#define memory_region_section_get_iotlb memory_region_section_get_iotlb_tricore +#define flatview_add_to_dispatch flatview_add_to_dispatch_tricore +#define qemu_ram_get_host_addr qemu_ram_get_host_addr_tricore +#define qemu_ram_get_offset qemu_ram_get_offset_tricore +#define qemu_ram_get_used_length qemu_ram_get_used_length_tricore +#define qemu_ram_is_shared qemu_ram_is_shared_tricore +#define qemu_ram_pagesize qemu_ram_pagesize_tricore +#define qemu_ram_alloc_from_ptr qemu_ram_alloc_from_ptr_tricore +#define qemu_ram_alloc qemu_ram_alloc_tricore +#define qemu_ram_free qemu_ram_free_tricore +#define qemu_map_ram_ptr qemu_map_ram_ptr_tricore +#define qemu_ram_block_host_offset qemu_ram_block_host_offset_tricore +#define qemu_ram_block_from_host qemu_ram_block_from_host_tricore +#define qemu_ram_addr_from_host qemu_ram_addr_from_host_tricore +#define cpu_check_watchpoint cpu_check_watchpoint_tricore +#define iotlb_to_section iotlb_to_section_tricore +#define address_space_dispatch_new address_space_dispatch_new_tricore +#define address_space_dispatch_free address_space_dispatch_free_tricore +#define flatview_read_continue flatview_read_continue_tricore +#define address_space_read_full address_space_read_full_tricore +#define address_space_write address_space_write_tricore +#define address_space_rw address_space_rw_tricore +#define cpu_physical_memory_rw cpu_physical_memory_rw_tricore +#define address_space_write_rom address_space_write_rom_tricore +#define cpu_flush_icache_range cpu_flush_icache_range_tricore +#define cpu_exec_init_all cpu_exec_init_all_tricore +#define address_space_access_valid address_space_access_valid_tricore +#define address_space_map address_space_map_tricore +#define address_space_unmap address_space_unmap_tricore +#define cpu_physical_memory_map cpu_physical_memory_map_tricore +#define cpu_physical_memory_unmap cpu_physical_memory_unmap_tricore +#define cpu_memory_rw_debug cpu_memory_rw_debug_tricore +#define qemu_target_page_size qemu_target_page_size_tricore +#define qemu_target_page_bits qemu_target_page_bits_tricore +#define qemu_target_page_bits_min qemu_target_page_bits_min_tricore +#define target_words_bigendian target_words_bigendian_tricore +#define cpu_physical_memory_is_io cpu_physical_memory_is_io_tricore +#define ram_block_discard_range ram_block_discard_range_tricore +#define ramblock_is_pmem ramblock_is_pmem_tricore +#define page_size_init page_size_init_tricore +#define set_preferred_target_page_bits set_preferred_target_page_bits_tricore +#define finalize_target_page_bits finalize_target_page_bits_tricore +#define cpu_outb cpu_outb_tricore +#define cpu_outw cpu_outw_tricore +#define cpu_outl cpu_outl_tricore +#define cpu_inb cpu_inb_tricore +#define cpu_inw cpu_inw_tricore +#define cpu_inl cpu_inl_tricore +#define memory_map memory_map_tricore +#define memory_map_io memory_map_io_tricore +#define memory_map_ptr memory_map_ptr_tricore +#define memory_unmap memory_unmap_tricore +#define memory_free memory_free_tricore +#define flatview_unref flatview_unref_tricore +#define address_space_get_flatview address_space_get_flatview_tricore +#define memory_region_transaction_begin memory_region_transaction_begin_tricore +#define memory_region_transaction_commit memory_region_transaction_commit_tricore +#define memory_region_init memory_region_init_tricore +#define memory_region_access_valid memory_region_access_valid_tricore +#define memory_region_dispatch_read memory_region_dispatch_read_tricore +#define memory_region_dispatch_write memory_region_dispatch_write_tricore +#define memory_region_init_io memory_region_init_io_tricore +#define memory_region_init_ram_ptr memory_region_init_ram_ptr_tricore +#define memory_region_size memory_region_size_tricore +#define memory_region_set_readonly memory_region_set_readonly_tricore +#define memory_region_get_ram_ptr memory_region_get_ram_ptr_tricore +#define memory_region_from_host memory_region_from_host_tricore +#define memory_region_get_ram_addr memory_region_get_ram_addr_tricore +#define memory_region_add_subregion memory_region_add_subregion_tricore +#define memory_region_del_subregion memory_region_del_subregion_tricore +#define memory_region_find memory_region_find_tricore +#define memory_listener_register memory_listener_register_tricore +#define memory_listener_unregister memory_listener_unregister_tricore +#define address_space_remove_listeners address_space_remove_listeners_tricore +#define address_space_init address_space_init_tricore +#define address_space_destroy address_space_destroy_tricore +#define memory_region_init_ram memory_region_init_ram_tricore +#define memory_mapping_list_add_merge_sorted memory_mapping_list_add_merge_sorted_tricore +#define exec_inline_op exec_inline_op_tricore +#define floatx80_default_nan floatx80_default_nan_tricore +#define float_raise float_raise_tricore +#define float16_is_quiet_nan float16_is_quiet_nan_tricore +#define float16_is_signaling_nan float16_is_signaling_nan_tricore +#define float32_is_quiet_nan float32_is_quiet_nan_tricore +#define float32_is_signaling_nan float32_is_signaling_nan_tricore +#define float64_is_quiet_nan float64_is_quiet_nan_tricore +#define float64_is_signaling_nan float64_is_signaling_nan_tricore +#define floatx80_is_quiet_nan floatx80_is_quiet_nan_tricore +#define floatx80_is_signaling_nan floatx80_is_signaling_nan_tricore +#define floatx80_silence_nan floatx80_silence_nan_tricore +#define propagateFloatx80NaN propagateFloatx80NaN_tricore +#define float128_is_quiet_nan float128_is_quiet_nan_tricore +#define float128_is_signaling_nan float128_is_signaling_nan_tricore +#define float128_silence_nan float128_silence_nan_tricore +#define float16_add float16_add_tricore +#define float16_sub float16_sub_tricore +#define float32_add float32_add_tricore +#define float32_sub float32_sub_tricore +#define float64_add float64_add_tricore +#define float64_sub float64_sub_tricore +#define float16_mul float16_mul_tricore +#define float32_mul float32_mul_tricore +#define float64_mul float64_mul_tricore +#define float16_muladd float16_muladd_tricore +#define float32_muladd float32_muladd_tricore +#define float64_muladd float64_muladd_tricore +#define float16_div float16_div_tricore +#define float32_div float32_div_tricore +#define float64_div float64_div_tricore +#define float16_to_float32 float16_to_float32_tricore +#define float16_to_float64 float16_to_float64_tricore +#define float32_to_float16 float32_to_float16_tricore +#define float32_to_float64 float32_to_float64_tricore +#define float64_to_float16 float64_to_float16_tricore +#define float64_to_float32 float64_to_float32_tricore +#define float16_round_to_int float16_round_to_int_tricore +#define float32_round_to_int float32_round_to_int_tricore +#define float64_round_to_int float64_round_to_int_tricore +#define float16_to_int16_scalbn float16_to_int16_scalbn_tricore +#define float16_to_int32_scalbn float16_to_int32_scalbn_tricore +#define float16_to_int64_scalbn float16_to_int64_scalbn_tricore +#define float32_to_int16_scalbn float32_to_int16_scalbn_tricore +#define float32_to_int32_scalbn float32_to_int32_scalbn_tricore +#define float32_to_int64_scalbn float32_to_int64_scalbn_tricore +#define float64_to_int16_scalbn float64_to_int16_scalbn_tricore +#define float64_to_int32_scalbn float64_to_int32_scalbn_tricore +#define float64_to_int64_scalbn float64_to_int64_scalbn_tricore +#define float16_to_int16 float16_to_int16_tricore +#define float16_to_int32 float16_to_int32_tricore +#define float16_to_int64 float16_to_int64_tricore +#define float32_to_int16 float32_to_int16_tricore +#define float32_to_int32 float32_to_int32_tricore +#define float32_to_int64 float32_to_int64_tricore +#define float64_to_int16 float64_to_int16_tricore +#define float64_to_int32 float64_to_int32_tricore +#define float64_to_int64 float64_to_int64_tricore +#define float16_to_int16_round_to_zero float16_to_int16_round_to_zero_tricore +#define float16_to_int32_round_to_zero float16_to_int32_round_to_zero_tricore +#define float16_to_int64_round_to_zero float16_to_int64_round_to_zero_tricore +#define float32_to_int16_round_to_zero float32_to_int16_round_to_zero_tricore +#define float32_to_int32_round_to_zero float32_to_int32_round_to_zero_tricore +#define float32_to_int64_round_to_zero float32_to_int64_round_to_zero_tricore +#define float64_to_int16_round_to_zero float64_to_int16_round_to_zero_tricore +#define float64_to_int32_round_to_zero float64_to_int32_round_to_zero_tricore +#define float64_to_int64_round_to_zero float64_to_int64_round_to_zero_tricore +#define float16_to_uint16_scalbn float16_to_uint16_scalbn_tricore +#define float16_to_uint32_scalbn float16_to_uint32_scalbn_tricore +#define float16_to_uint64_scalbn float16_to_uint64_scalbn_tricore +#define float32_to_uint16_scalbn float32_to_uint16_scalbn_tricore +#define float32_to_uint32_scalbn float32_to_uint32_scalbn_tricore +#define float32_to_uint64_scalbn float32_to_uint64_scalbn_tricore +#define float64_to_uint16_scalbn float64_to_uint16_scalbn_tricore +#define float64_to_uint32_scalbn float64_to_uint32_scalbn_tricore +#define float64_to_uint64_scalbn float64_to_uint64_scalbn_tricore +#define float16_to_uint16 float16_to_uint16_tricore +#define float16_to_uint32 float16_to_uint32_tricore +#define float16_to_uint64 float16_to_uint64_tricore +#define float32_to_uint16 float32_to_uint16_tricore +#define float32_to_uint32 float32_to_uint32_tricore +#define float32_to_uint64 float32_to_uint64_tricore +#define float64_to_uint16 float64_to_uint16_tricore +#define float64_to_uint32 float64_to_uint32_tricore +#define float64_to_uint64 float64_to_uint64_tricore +#define float16_to_uint16_round_to_zero float16_to_uint16_round_to_zero_tricore +#define float16_to_uint32_round_to_zero float16_to_uint32_round_to_zero_tricore +#define float16_to_uint64_round_to_zero float16_to_uint64_round_to_zero_tricore +#define float32_to_uint16_round_to_zero float32_to_uint16_round_to_zero_tricore +#define float32_to_uint32_round_to_zero float32_to_uint32_round_to_zero_tricore +#define float32_to_uint64_round_to_zero float32_to_uint64_round_to_zero_tricore +#define float64_to_uint16_round_to_zero float64_to_uint16_round_to_zero_tricore +#define float64_to_uint32_round_to_zero float64_to_uint32_round_to_zero_tricore +#define float64_to_uint64_round_to_zero float64_to_uint64_round_to_zero_tricore +#define int64_to_float16_scalbn int64_to_float16_scalbn_tricore +#define int32_to_float16_scalbn int32_to_float16_scalbn_tricore +#define int16_to_float16_scalbn int16_to_float16_scalbn_tricore +#define int64_to_float16 int64_to_float16_tricore +#define int32_to_float16 int32_to_float16_tricore +#define int16_to_float16 int16_to_float16_tricore +#define int64_to_float32_scalbn int64_to_float32_scalbn_tricore +#define int32_to_float32_scalbn int32_to_float32_scalbn_tricore +#define int16_to_float32_scalbn int16_to_float32_scalbn_tricore +#define int64_to_float32 int64_to_float32_tricore +#define int32_to_float32 int32_to_float32_tricore +#define int16_to_float32 int16_to_float32_tricore +#define int64_to_float64_scalbn int64_to_float64_scalbn_tricore +#define int32_to_float64_scalbn int32_to_float64_scalbn_tricore +#define int16_to_float64_scalbn int16_to_float64_scalbn_tricore +#define int64_to_float64 int64_to_float64_tricore +#define int32_to_float64 int32_to_float64_tricore +#define int16_to_float64 int16_to_float64_tricore +#define uint64_to_float16_scalbn uint64_to_float16_scalbn_tricore +#define uint32_to_float16_scalbn uint32_to_float16_scalbn_tricore +#define uint16_to_float16_scalbn uint16_to_float16_scalbn_tricore +#define uint64_to_float16 uint64_to_float16_tricore +#define uint32_to_float16 uint32_to_float16_tricore +#define uint16_to_float16 uint16_to_float16_tricore +#define uint64_to_float32_scalbn uint64_to_float32_scalbn_tricore +#define uint32_to_float32_scalbn uint32_to_float32_scalbn_tricore +#define uint16_to_float32_scalbn uint16_to_float32_scalbn_tricore +#define uint64_to_float32 uint64_to_float32_tricore +#define uint32_to_float32 uint32_to_float32_tricore +#define uint16_to_float32 uint16_to_float32_tricore +#define uint64_to_float64_scalbn uint64_to_float64_scalbn_tricore +#define uint32_to_float64_scalbn uint32_to_float64_scalbn_tricore +#define uint16_to_float64_scalbn uint16_to_float64_scalbn_tricore +#define uint64_to_float64 uint64_to_float64_tricore +#define uint32_to_float64 uint32_to_float64_tricore +#define uint16_to_float64 uint16_to_float64_tricore +#define float16_min float16_min_tricore +#define float16_minnum float16_minnum_tricore +#define float16_minnummag float16_minnummag_tricore +#define float16_max float16_max_tricore +#define float16_maxnum float16_maxnum_tricore +#define float16_maxnummag float16_maxnummag_tricore +#define float32_min float32_min_tricore +#define float32_minnum float32_minnum_tricore +#define float32_minnummag float32_minnummag_tricore +#define float32_max float32_max_tricore +#define float32_maxnum float32_maxnum_tricore +#define float32_maxnummag float32_maxnummag_tricore +#define float64_min float64_min_tricore +#define float64_minnum float64_minnum_tricore +#define float64_minnummag float64_minnummag_tricore +#define float64_max float64_max_tricore +#define float64_maxnum float64_maxnum_tricore +#define float64_maxnummag float64_maxnummag_tricore +#define float16_compare float16_compare_tricore +#define float16_compare_quiet float16_compare_quiet_tricore +#define float32_compare float32_compare_tricore +#define float32_compare_quiet float32_compare_quiet_tricore +#define float64_compare float64_compare_tricore +#define float64_compare_quiet float64_compare_quiet_tricore +#define float16_scalbn float16_scalbn_tricore +#define float32_scalbn float32_scalbn_tricore +#define float64_scalbn float64_scalbn_tricore +#define float16_sqrt float16_sqrt_tricore +#define float32_sqrt float32_sqrt_tricore +#define float64_sqrt float64_sqrt_tricore +#define float16_default_nan float16_default_nan_tricore +#define float32_default_nan float32_default_nan_tricore +#define float64_default_nan float64_default_nan_tricore +#define float128_default_nan float128_default_nan_tricore +#define float16_silence_nan float16_silence_nan_tricore +#define float32_silence_nan float32_silence_nan_tricore +#define float64_silence_nan float64_silence_nan_tricore +#define float16_squash_input_denormal float16_squash_input_denormal_tricore +#define float32_squash_input_denormal float32_squash_input_denormal_tricore +#define float64_squash_input_denormal float64_squash_input_denormal_tricore +#define normalizeFloatx80Subnormal normalizeFloatx80Subnormal_tricore +#define roundAndPackFloatx80 roundAndPackFloatx80_tricore +#define normalizeRoundAndPackFloatx80 normalizeRoundAndPackFloatx80_tricore +#define int32_to_floatx80 int32_to_floatx80_tricore +#define int32_to_float128 int32_to_float128_tricore +#define int64_to_floatx80 int64_to_floatx80_tricore +#define int64_to_float128 int64_to_float128_tricore +#define uint64_to_float128 uint64_to_float128_tricore +#define float32_to_floatx80 float32_to_floatx80_tricore +#define float32_to_float128 float32_to_float128_tricore +#define float32_rem float32_rem_tricore +#define float32_exp2 float32_exp2_tricore +#define float32_log2 float32_log2_tricore +#define float32_eq float32_eq_tricore +#define float32_le float32_le_tricore +#define float32_lt float32_lt_tricore +#define float32_unordered float32_unordered_tricore +#define float32_eq_quiet float32_eq_quiet_tricore +#define float32_le_quiet float32_le_quiet_tricore +#define float32_lt_quiet float32_lt_quiet_tricore +#define float32_unordered_quiet float32_unordered_quiet_tricore +#define float64_to_floatx80 float64_to_floatx80_tricore +#define float64_to_float128 float64_to_float128_tricore +#define float64_rem float64_rem_tricore +#define float64_log2 float64_log2_tricore +#define float64_eq float64_eq_tricore +#define float64_le float64_le_tricore +#define float64_lt float64_lt_tricore +#define float64_unordered float64_unordered_tricore +#define float64_eq_quiet float64_eq_quiet_tricore +#define float64_le_quiet float64_le_quiet_tricore +#define float64_lt_quiet float64_lt_quiet_tricore +#define float64_unordered_quiet float64_unordered_quiet_tricore +#define floatx80_to_int32 floatx80_to_int32_tricore +#define floatx80_to_int32_round_to_zero floatx80_to_int32_round_to_zero_tricore +#define floatx80_to_int64 floatx80_to_int64_tricore +#define floatx80_to_int64_round_to_zero floatx80_to_int64_round_to_zero_tricore +#define floatx80_to_float32 floatx80_to_float32_tricore +#define floatx80_to_float64 floatx80_to_float64_tricore +#define floatx80_to_float128 floatx80_to_float128_tricore +#define floatx80_round floatx80_round_tricore +#define floatx80_round_to_int floatx80_round_to_int_tricore +#define floatx80_add floatx80_add_tricore +#define floatx80_sub floatx80_sub_tricore +#define floatx80_mul floatx80_mul_tricore +#define floatx80_div floatx80_div_tricore +#define floatx80_rem floatx80_rem_tricore +#define floatx80_sqrt floatx80_sqrt_tricore +#define floatx80_eq floatx80_eq_tricore +#define floatx80_le floatx80_le_tricore +#define floatx80_lt floatx80_lt_tricore +#define floatx80_unordered floatx80_unordered_tricore +#define floatx80_eq_quiet floatx80_eq_quiet_tricore +#define floatx80_le_quiet floatx80_le_quiet_tricore +#define floatx80_lt_quiet floatx80_lt_quiet_tricore +#define floatx80_unordered_quiet floatx80_unordered_quiet_tricore +#define float128_to_int32 float128_to_int32_tricore +#define float128_to_int32_round_to_zero float128_to_int32_round_to_zero_tricore +#define float128_to_int64 float128_to_int64_tricore +#define float128_to_int64_round_to_zero float128_to_int64_round_to_zero_tricore +#define float128_to_uint64 float128_to_uint64_tricore +#define float128_to_uint64_round_to_zero float128_to_uint64_round_to_zero_tricore +#define float128_to_uint32_round_to_zero float128_to_uint32_round_to_zero_tricore +#define float128_to_uint32 float128_to_uint32_tricore +#define float128_to_float32 float128_to_float32_tricore +#define float128_to_float64 float128_to_float64_tricore +#define float128_to_floatx80 float128_to_floatx80_tricore +#define float128_round_to_int float128_round_to_int_tricore +#define float128_add float128_add_tricore +#define float128_sub float128_sub_tricore +#define float128_mul float128_mul_tricore +#define float128_div float128_div_tricore +#define float128_rem float128_rem_tricore +#define float128_sqrt float128_sqrt_tricore +#define float128_eq float128_eq_tricore +#define float128_le float128_le_tricore +#define float128_lt float128_lt_tricore +#define float128_unordered float128_unordered_tricore +#define float128_eq_quiet float128_eq_quiet_tricore +#define float128_le_quiet float128_le_quiet_tricore +#define float128_lt_quiet float128_lt_quiet_tricore +#define float128_unordered_quiet float128_unordered_quiet_tricore +#define floatx80_compare floatx80_compare_tricore +#define floatx80_compare_quiet floatx80_compare_quiet_tricore +#define float128_compare float128_compare_tricore +#define float128_compare_quiet float128_compare_quiet_tricore +#define floatx80_scalbn floatx80_scalbn_tricore +#define float128_scalbn float128_scalbn_tricore +#define softfloat_init softfloat_init_tricore +#define tcg_optimize tcg_optimize_tricore +#define gen_new_label gen_new_label_tricore +#define tcg_can_emit_vec_op tcg_can_emit_vec_op_tricore +#define tcg_expand_vec_op tcg_expand_vec_op_tricore +#define tcg_register_jit tcg_register_jit_tricore +#define tcg_tb_insert tcg_tb_insert_tricore +#define tcg_tb_remove tcg_tb_remove_tricore +#define tcg_tb_lookup tcg_tb_lookup_tricore +#define tcg_tb_foreach tcg_tb_foreach_tricore +#define tcg_nb_tbs tcg_nb_tbs_tricore +#define tcg_region_reset_all tcg_region_reset_all_tricore +#define tcg_region_init tcg_region_init_tricore +#define tcg_code_size tcg_code_size_tricore +#define tcg_code_capacity tcg_code_capacity_tricore +#define tcg_tb_phys_invalidate_count tcg_tb_phys_invalidate_count_tricore +#define tcg_malloc_internal tcg_malloc_internal_tricore +#define tcg_pool_reset tcg_pool_reset_tricore +#define tcg_context_init tcg_context_init_tricore +#define tcg_tb_alloc tcg_tb_alloc_tricore +#define tcg_prologue_init tcg_prologue_init_tricore +#define tcg_func_start tcg_func_start_tricore +#define tcg_set_frame tcg_set_frame_tricore +#define tcg_global_mem_new_internal tcg_global_mem_new_internal_tricore +#define tcg_temp_new_internal tcg_temp_new_internal_tricore +#define tcg_temp_new_vec tcg_temp_new_vec_tricore +#define tcg_temp_new_vec_matching tcg_temp_new_vec_matching_tricore +#define tcg_temp_free_internal tcg_temp_free_internal_tricore +#define tcg_const_i32 tcg_const_i32_tricore +#define tcg_const_i64 tcg_const_i64_tricore +#define tcg_const_local_i32 tcg_const_local_i32_tricore +#define tcg_const_local_i64 tcg_const_local_i64_tricore +#define tcg_op_supported tcg_op_supported_tricore +#define tcg_gen_callN tcg_gen_callN_tricore +#define tcg_op_remove tcg_op_remove_tricore +#define tcg_emit_op tcg_emit_op_tricore +#define tcg_op_insert_before tcg_op_insert_before_tricore +#define tcg_op_insert_after tcg_op_insert_after_tricore +#define tcg_cpu_exec_time tcg_cpu_exec_time_tricore +#define tcg_gen_code tcg_gen_code_tricore +#define tcg_gen_op1 tcg_gen_op1_tricore +#define tcg_gen_op2 tcg_gen_op2_tricore +#define tcg_gen_op3 tcg_gen_op3_tricore +#define tcg_gen_op4 tcg_gen_op4_tricore +#define tcg_gen_op5 tcg_gen_op5_tricore +#define tcg_gen_op6 tcg_gen_op6_tricore +#define tcg_gen_mb tcg_gen_mb_tricore +#define tcg_gen_addi_i32 tcg_gen_addi_i32_tricore +#define tcg_gen_subfi_i32 tcg_gen_subfi_i32_tricore +#define tcg_gen_subi_i32 tcg_gen_subi_i32_tricore +#define tcg_gen_andi_i32 tcg_gen_andi_i32_tricore +#define tcg_gen_ori_i32 tcg_gen_ori_i32_tricore +#define tcg_gen_xori_i32 tcg_gen_xori_i32_tricore +#define tcg_gen_shli_i32 tcg_gen_shli_i32_tricore +#define tcg_gen_shri_i32 tcg_gen_shri_i32_tricore +#define tcg_gen_sari_i32 tcg_gen_sari_i32_tricore +#define tcg_gen_brcond_i32 tcg_gen_brcond_i32_tricore +#define tcg_gen_brcondi_i32 tcg_gen_brcondi_i32_tricore +#define tcg_gen_setcond_i32 tcg_gen_setcond_i32_tricore +#define tcg_gen_setcondi_i32 tcg_gen_setcondi_i32_tricore +#define tcg_gen_muli_i32 tcg_gen_muli_i32_tricore +#define tcg_gen_div_i32 tcg_gen_div_i32_tricore +#define tcg_gen_rem_i32 tcg_gen_rem_i32_tricore +#define tcg_gen_divu_i32 tcg_gen_divu_i32_tricore +#define tcg_gen_remu_i32 tcg_gen_remu_i32_tricore +#define tcg_gen_andc_i32 tcg_gen_andc_i32_tricore +#define tcg_gen_eqv_i32 tcg_gen_eqv_i32_tricore +#define tcg_gen_nand_i32 tcg_gen_nand_i32_tricore +#define tcg_gen_nor_i32 tcg_gen_nor_i32_tricore +#define tcg_gen_orc_i32 tcg_gen_orc_i32_tricore +#define tcg_gen_clz_i32 tcg_gen_clz_i32_tricore +#define tcg_gen_clzi_i32 tcg_gen_clzi_i32_tricore +#define tcg_gen_ctz_i32 tcg_gen_ctz_i32_tricore +#define tcg_gen_ctzi_i32 tcg_gen_ctzi_i32_tricore +#define tcg_gen_clrsb_i32 tcg_gen_clrsb_i32_tricore +#define tcg_gen_ctpop_i32 tcg_gen_ctpop_i32_tricore +#define tcg_gen_rotl_i32 tcg_gen_rotl_i32_tricore +#define tcg_gen_rotli_i32 tcg_gen_rotli_i32_tricore +#define tcg_gen_rotr_i32 tcg_gen_rotr_i32_tricore +#define tcg_gen_rotri_i32 tcg_gen_rotri_i32_tricore +#define tcg_gen_deposit_i32 tcg_gen_deposit_i32_tricore +#define tcg_gen_deposit_z_i32 tcg_gen_deposit_z_i32_tricore +#define tcg_gen_extract_i32 tcg_gen_extract_i32_tricore +#define tcg_gen_sextract_i32 tcg_gen_sextract_i32_tricore +#define tcg_gen_extract2_i32 tcg_gen_extract2_i32_tricore +#define tcg_gen_movcond_i32 tcg_gen_movcond_i32_tricore +#define tcg_gen_add2_i32 tcg_gen_add2_i32_tricore +#define tcg_gen_sub2_i32 tcg_gen_sub2_i32_tricore +#define tcg_gen_mulu2_i32 tcg_gen_mulu2_i32_tricore +#define tcg_gen_muls2_i32 tcg_gen_muls2_i32_tricore +#define tcg_gen_mulsu2_i32 tcg_gen_mulsu2_i32_tricore +#define tcg_gen_ext8s_i32 tcg_gen_ext8s_i32_tricore +#define tcg_gen_ext16s_i32 tcg_gen_ext16s_i32_tricore +#define tcg_gen_ext8u_i32 tcg_gen_ext8u_i32_tricore +#define tcg_gen_ext16u_i32 tcg_gen_ext16u_i32_tricore +#define tcg_gen_bswap16_i32 tcg_gen_bswap16_i32_tricore +#define tcg_gen_bswap32_i32 tcg_gen_bswap32_i32_tricore +#define tcg_gen_smin_i32 tcg_gen_smin_i32_tricore +#define tcg_gen_umin_i32 tcg_gen_umin_i32_tricore +#define tcg_gen_smax_i32 tcg_gen_smax_i32_tricore +#define tcg_gen_umax_i32 tcg_gen_umax_i32_tricore +#define tcg_gen_abs_i32 tcg_gen_abs_i32_tricore +#define tcg_gen_addi_i64 tcg_gen_addi_i64_tricore +#define tcg_gen_subfi_i64 tcg_gen_subfi_i64_tricore +#define tcg_gen_subi_i64 tcg_gen_subi_i64_tricore +#define tcg_gen_andi_i64 tcg_gen_andi_i64_tricore +#define tcg_gen_ori_i64 tcg_gen_ori_i64_tricore +#define tcg_gen_xori_i64 tcg_gen_xori_i64_tricore +#define tcg_gen_shli_i64 tcg_gen_shli_i64_tricore +#define tcg_gen_shri_i64 tcg_gen_shri_i64_tricore +#define tcg_gen_sari_i64 tcg_gen_sari_i64_tricore +#define tcg_gen_brcond_i64 tcg_gen_brcond_i64_tricore +#define tcg_gen_brcondi_i64 tcg_gen_brcondi_i64_tricore +#define tcg_gen_setcond_i64 tcg_gen_setcond_i64_tricore +#define tcg_gen_setcondi_i64 tcg_gen_setcondi_i64_tricore +#define tcg_gen_muli_i64 tcg_gen_muli_i64_tricore +#define tcg_gen_div_i64 tcg_gen_div_i64_tricore +#define tcg_gen_rem_i64 tcg_gen_rem_i64_tricore +#define tcg_gen_divu_i64 tcg_gen_divu_i64_tricore +#define tcg_gen_remu_i64 tcg_gen_remu_i64_tricore +#define tcg_gen_ext8s_i64 tcg_gen_ext8s_i64_tricore +#define tcg_gen_ext16s_i64 tcg_gen_ext16s_i64_tricore +#define tcg_gen_ext32s_i64 tcg_gen_ext32s_i64_tricore +#define tcg_gen_ext8u_i64 tcg_gen_ext8u_i64_tricore +#define tcg_gen_ext16u_i64 tcg_gen_ext16u_i64_tricore +#define tcg_gen_ext32u_i64 tcg_gen_ext32u_i64_tricore +#define tcg_gen_bswap16_i64 tcg_gen_bswap16_i64_tricore +#define tcg_gen_bswap32_i64 tcg_gen_bswap32_i64_tricore +#define tcg_gen_bswap64_i64 tcg_gen_bswap64_i64_tricore +#define tcg_gen_not_i64 tcg_gen_not_i64_tricore +#define tcg_gen_andc_i64 tcg_gen_andc_i64_tricore +#define tcg_gen_eqv_i64 tcg_gen_eqv_i64_tricore +#define tcg_gen_nand_i64 tcg_gen_nand_i64_tricore +#define tcg_gen_nor_i64 tcg_gen_nor_i64_tricore +#define tcg_gen_orc_i64 tcg_gen_orc_i64_tricore +#define tcg_gen_clz_i64 tcg_gen_clz_i64_tricore +#define tcg_gen_clzi_i64 tcg_gen_clzi_i64_tricore +#define tcg_gen_ctz_i64 tcg_gen_ctz_i64_tricore +#define tcg_gen_ctzi_i64 tcg_gen_ctzi_i64_tricore +#define tcg_gen_clrsb_i64 tcg_gen_clrsb_i64_tricore +#define tcg_gen_ctpop_i64 tcg_gen_ctpop_i64_tricore +#define tcg_gen_rotl_i64 tcg_gen_rotl_i64_tricore +#define tcg_gen_rotli_i64 tcg_gen_rotli_i64_tricore +#define tcg_gen_rotr_i64 tcg_gen_rotr_i64_tricore +#define tcg_gen_rotri_i64 tcg_gen_rotri_i64_tricore +#define tcg_gen_deposit_i64 tcg_gen_deposit_i64_tricore +#define tcg_gen_deposit_z_i64 tcg_gen_deposit_z_i64_tricore +#define tcg_gen_extract_i64 tcg_gen_extract_i64_tricore +#define tcg_gen_sextract_i64 tcg_gen_sextract_i64_tricore +#define tcg_gen_extract2_i64 tcg_gen_extract2_i64_tricore +#define tcg_gen_movcond_i64 tcg_gen_movcond_i64_tricore +#define tcg_gen_add2_i64 tcg_gen_add2_i64_tricore +#define tcg_gen_sub2_i64 tcg_gen_sub2_i64_tricore +#define tcg_gen_mulu2_i64 tcg_gen_mulu2_i64_tricore +#define tcg_gen_muls2_i64 tcg_gen_muls2_i64_tricore +#define tcg_gen_mulsu2_i64 tcg_gen_mulsu2_i64_tricore +#define tcg_gen_smin_i64 tcg_gen_smin_i64_tricore +#define tcg_gen_umin_i64 tcg_gen_umin_i64_tricore +#define tcg_gen_smax_i64 tcg_gen_smax_i64_tricore +#define tcg_gen_umax_i64 tcg_gen_umax_i64_tricore +#define tcg_gen_abs_i64 tcg_gen_abs_i64_tricore +#define tcg_gen_extrl_i64_i32 tcg_gen_extrl_i64_i32_tricore +#define tcg_gen_extrh_i64_i32 tcg_gen_extrh_i64_i32_tricore +#define tcg_gen_extu_i32_i64 tcg_gen_extu_i32_i64_tricore +#define tcg_gen_ext_i32_i64 tcg_gen_ext_i32_i64_tricore +#define tcg_gen_concat_i32_i64 tcg_gen_concat_i32_i64_tricore +#define tcg_gen_extr_i64_i32 tcg_gen_extr_i64_i32_tricore +#define tcg_gen_extr32_i64 tcg_gen_extr32_i64_tricore +#define tcg_gen_exit_tb tcg_gen_exit_tb_tricore +#define tcg_gen_goto_tb tcg_gen_goto_tb_tricore +#define tcg_gen_lookup_and_goto_ptr tcg_gen_lookup_and_goto_ptr_tricore +#define check_exit_request check_exit_request_tricore +#define tcg_gen_qemu_ld_i32 tcg_gen_qemu_ld_i32_tricore +#define tcg_gen_qemu_st_i32 tcg_gen_qemu_st_i32_tricore +#define tcg_gen_qemu_ld_i64 tcg_gen_qemu_ld_i64_tricore +#define tcg_gen_qemu_st_i64 tcg_gen_qemu_st_i64_tricore +#define tcg_gen_atomic_cmpxchg_i32 tcg_gen_atomic_cmpxchg_i32_tricore +#define tcg_gen_atomic_cmpxchg_i64 tcg_gen_atomic_cmpxchg_i64_tricore +#define tcg_gen_atomic_fetch_add_i32 tcg_gen_atomic_fetch_add_i32_tricore +#define tcg_gen_atomic_fetch_add_i64 tcg_gen_atomic_fetch_add_i64_tricore +#define tcg_gen_atomic_fetch_and_i32 tcg_gen_atomic_fetch_and_i32_tricore +#define tcg_gen_atomic_fetch_and_i64 tcg_gen_atomic_fetch_and_i64_tricore +#define tcg_gen_atomic_fetch_or_i32 tcg_gen_atomic_fetch_or_i32_tricore +#define tcg_gen_atomic_fetch_or_i64 tcg_gen_atomic_fetch_or_i64_tricore +#define tcg_gen_atomic_fetch_xor_i32 tcg_gen_atomic_fetch_xor_i32_tricore +#define tcg_gen_atomic_fetch_xor_i64 tcg_gen_atomic_fetch_xor_i64_tricore +#define tcg_gen_atomic_fetch_smin_i32 tcg_gen_atomic_fetch_smin_i32_tricore +#define tcg_gen_atomic_fetch_smin_i64 tcg_gen_atomic_fetch_smin_i64_tricore +#define tcg_gen_atomic_fetch_umin_i32 tcg_gen_atomic_fetch_umin_i32_tricore +#define tcg_gen_atomic_fetch_umin_i64 tcg_gen_atomic_fetch_umin_i64_tricore +#define tcg_gen_atomic_fetch_smax_i32 tcg_gen_atomic_fetch_smax_i32_tricore +#define tcg_gen_atomic_fetch_smax_i64 tcg_gen_atomic_fetch_smax_i64_tricore +#define tcg_gen_atomic_fetch_umax_i32 tcg_gen_atomic_fetch_umax_i32_tricore +#define tcg_gen_atomic_fetch_umax_i64 tcg_gen_atomic_fetch_umax_i64_tricore +#define tcg_gen_atomic_add_fetch_i32 tcg_gen_atomic_add_fetch_i32_tricore +#define tcg_gen_atomic_add_fetch_i64 tcg_gen_atomic_add_fetch_i64_tricore +#define tcg_gen_atomic_and_fetch_i32 tcg_gen_atomic_and_fetch_i32_tricore +#define tcg_gen_atomic_and_fetch_i64 tcg_gen_atomic_and_fetch_i64_tricore +#define tcg_gen_atomic_or_fetch_i32 tcg_gen_atomic_or_fetch_i32_tricore +#define tcg_gen_atomic_or_fetch_i64 tcg_gen_atomic_or_fetch_i64_tricore +#define tcg_gen_atomic_xor_fetch_i32 tcg_gen_atomic_xor_fetch_i32_tricore +#define tcg_gen_atomic_xor_fetch_i64 tcg_gen_atomic_xor_fetch_i64_tricore +#define tcg_gen_atomic_smin_fetch_i32 tcg_gen_atomic_smin_fetch_i32_tricore +#define tcg_gen_atomic_smin_fetch_i64 tcg_gen_atomic_smin_fetch_i64_tricore +#define tcg_gen_atomic_umin_fetch_i32 tcg_gen_atomic_umin_fetch_i32_tricore +#define tcg_gen_atomic_umin_fetch_i64 tcg_gen_atomic_umin_fetch_i64_tricore +#define tcg_gen_atomic_smax_fetch_i32 tcg_gen_atomic_smax_fetch_i32_tricore +#define tcg_gen_atomic_smax_fetch_i64 tcg_gen_atomic_smax_fetch_i64_tricore +#define tcg_gen_atomic_umax_fetch_i32 tcg_gen_atomic_umax_fetch_i32_tricore +#define tcg_gen_atomic_umax_fetch_i64 tcg_gen_atomic_umax_fetch_i64_tricore +#define tcg_gen_atomic_xchg_i32 tcg_gen_atomic_xchg_i32_tricore +#define tcg_gen_atomic_xchg_i64 tcg_gen_atomic_xchg_i64_tricore +#define simd_desc simd_desc_tricore +#define tcg_gen_gvec_2_ool tcg_gen_gvec_2_ool_tricore +#define tcg_gen_gvec_2i_ool tcg_gen_gvec_2i_ool_tricore +#define tcg_gen_gvec_3_ool tcg_gen_gvec_3_ool_tricore +#define tcg_gen_gvec_4_ool tcg_gen_gvec_4_ool_tricore +#define tcg_gen_gvec_5_ool tcg_gen_gvec_5_ool_tricore +#define tcg_gen_gvec_2_ptr tcg_gen_gvec_2_ptr_tricore +#define tcg_gen_gvec_3_ptr tcg_gen_gvec_3_ptr_tricore +#define tcg_gen_gvec_4_ptr tcg_gen_gvec_4_ptr_tricore +#define tcg_gen_gvec_5_ptr tcg_gen_gvec_5_ptr_tricore +#define tcg_gen_gvec_2 tcg_gen_gvec_2_tricore +#define tcg_gen_gvec_2i tcg_gen_gvec_2i_tricore +#define tcg_gen_gvec_2s tcg_gen_gvec_2s_tricore +#define tcg_gen_gvec_3 tcg_gen_gvec_3_tricore +#define tcg_gen_gvec_3i tcg_gen_gvec_3i_tricore +#define tcg_gen_gvec_4 tcg_gen_gvec_4_tricore +#define tcg_gen_gvec_mov tcg_gen_gvec_mov_tricore +#define tcg_gen_gvec_dup_i32 tcg_gen_gvec_dup_i32_tricore +#define tcg_gen_gvec_dup_i64 tcg_gen_gvec_dup_i64_tricore +#define tcg_gen_gvec_dup_mem tcg_gen_gvec_dup_mem_tricore +#define tcg_gen_gvec_dup64i tcg_gen_gvec_dup64i_tricore +#define tcg_gen_gvec_dup32i tcg_gen_gvec_dup32i_tricore +#define tcg_gen_gvec_dup16i tcg_gen_gvec_dup16i_tricore +#define tcg_gen_gvec_dup8i tcg_gen_gvec_dup8i_tricore +#define tcg_gen_gvec_not tcg_gen_gvec_not_tricore +#define tcg_gen_vec_add8_i64 tcg_gen_vec_add8_i64_tricore +#define tcg_gen_vec_add16_i64 tcg_gen_vec_add16_i64_tricore +#define tcg_gen_vec_add32_i64 tcg_gen_vec_add32_i64_tricore +#define tcg_gen_gvec_add tcg_gen_gvec_add_tricore +#define tcg_gen_gvec_adds tcg_gen_gvec_adds_tricore +#define tcg_gen_gvec_addi tcg_gen_gvec_addi_tricore +#define tcg_gen_gvec_subs tcg_gen_gvec_subs_tricore +#define tcg_gen_vec_sub8_i64 tcg_gen_vec_sub8_i64_tricore +#define tcg_gen_vec_sub16_i64 tcg_gen_vec_sub16_i64_tricore +#define tcg_gen_vec_sub32_i64 tcg_gen_vec_sub32_i64_tricore +#define tcg_gen_gvec_sub tcg_gen_gvec_sub_tricore +#define tcg_gen_gvec_mul tcg_gen_gvec_mul_tricore +#define tcg_gen_gvec_muls tcg_gen_gvec_muls_tricore +#define tcg_gen_gvec_muli tcg_gen_gvec_muli_tricore +#define tcg_gen_gvec_ssadd tcg_gen_gvec_ssadd_tricore +#define tcg_gen_gvec_sssub tcg_gen_gvec_sssub_tricore +#define tcg_gen_gvec_usadd tcg_gen_gvec_usadd_tricore +#define tcg_gen_gvec_ussub tcg_gen_gvec_ussub_tricore +#define tcg_gen_gvec_smin tcg_gen_gvec_smin_tricore +#define tcg_gen_gvec_umin tcg_gen_gvec_umin_tricore +#define tcg_gen_gvec_smax tcg_gen_gvec_smax_tricore +#define tcg_gen_gvec_umax tcg_gen_gvec_umax_tricore +#define tcg_gen_vec_neg8_i64 tcg_gen_vec_neg8_i64_tricore +#define tcg_gen_vec_neg16_i64 tcg_gen_vec_neg16_i64_tricore +#define tcg_gen_vec_neg32_i64 tcg_gen_vec_neg32_i64_tricore +#define tcg_gen_gvec_neg tcg_gen_gvec_neg_tricore +#define tcg_gen_gvec_abs tcg_gen_gvec_abs_tricore +#define tcg_gen_gvec_and tcg_gen_gvec_and_tricore +#define tcg_gen_gvec_or tcg_gen_gvec_or_tricore +#define tcg_gen_gvec_xor tcg_gen_gvec_xor_tricore +#define tcg_gen_gvec_andc tcg_gen_gvec_andc_tricore +#define tcg_gen_gvec_orc tcg_gen_gvec_orc_tricore +#define tcg_gen_gvec_nand tcg_gen_gvec_nand_tricore +#define tcg_gen_gvec_nor tcg_gen_gvec_nor_tricore +#define tcg_gen_gvec_eqv tcg_gen_gvec_eqv_tricore +#define tcg_gen_gvec_ands tcg_gen_gvec_ands_tricore +#define tcg_gen_gvec_andi tcg_gen_gvec_andi_tricore +#define tcg_gen_gvec_xors tcg_gen_gvec_xors_tricore +#define tcg_gen_gvec_xori tcg_gen_gvec_xori_tricore +#define tcg_gen_gvec_ors tcg_gen_gvec_ors_tricore +#define tcg_gen_gvec_ori tcg_gen_gvec_ori_tricore +#define tcg_gen_vec_shl8i_i64 tcg_gen_vec_shl8i_i64_tricore +#define tcg_gen_vec_shl16i_i64 tcg_gen_vec_shl16i_i64_tricore +#define tcg_gen_gvec_shli tcg_gen_gvec_shli_tricore +#define tcg_gen_vec_shr8i_i64 tcg_gen_vec_shr8i_i64_tricore +#define tcg_gen_vec_shr16i_i64 tcg_gen_vec_shr16i_i64_tricore +#define tcg_gen_gvec_shri tcg_gen_gvec_shri_tricore +#define tcg_gen_vec_sar8i_i64 tcg_gen_vec_sar8i_i64_tricore +#define tcg_gen_vec_sar16i_i64 tcg_gen_vec_sar16i_i64_tricore +#define tcg_gen_gvec_sari tcg_gen_gvec_sari_tricore +#define tcg_gen_gvec_shls tcg_gen_gvec_shls_tricore +#define tcg_gen_gvec_shrs tcg_gen_gvec_shrs_tricore +#define tcg_gen_gvec_sars tcg_gen_gvec_sars_tricore +#define tcg_gen_gvec_shlv tcg_gen_gvec_shlv_tricore +#define tcg_gen_gvec_shrv tcg_gen_gvec_shrv_tricore +#define tcg_gen_gvec_sarv tcg_gen_gvec_sarv_tricore +#define tcg_gen_gvec_cmp tcg_gen_gvec_cmp_tricore +#define tcg_gen_gvec_bitsel tcg_gen_gvec_bitsel_tricore +#define tcg_can_emit_vecop_list tcg_can_emit_vecop_list_tricore +#define vec_gen_2 vec_gen_2_tricore +#define vec_gen_3 vec_gen_3_tricore +#define vec_gen_4 vec_gen_4_tricore +#define tcg_gen_mov_vec tcg_gen_mov_vec_tricore +#define tcg_const_zeros_vec tcg_const_zeros_vec_tricore +#define tcg_const_ones_vec tcg_const_ones_vec_tricore +#define tcg_const_zeros_vec_matching tcg_const_zeros_vec_matching_tricore +#define tcg_const_ones_vec_matching tcg_const_ones_vec_matching_tricore +#define tcg_gen_dup64i_vec tcg_gen_dup64i_vec_tricore +#define tcg_gen_dup32i_vec tcg_gen_dup32i_vec_tricore +#define tcg_gen_dup16i_vec tcg_gen_dup16i_vec_tricore +#define tcg_gen_dup8i_vec tcg_gen_dup8i_vec_tricore +#define tcg_gen_dupi_vec tcg_gen_dupi_vec_tricore +#define tcg_gen_dup_i64_vec tcg_gen_dup_i64_vec_tricore +#define tcg_gen_dup_i32_vec tcg_gen_dup_i32_vec_tricore +#define tcg_gen_dup_mem_vec tcg_gen_dup_mem_vec_tricore +#define tcg_gen_ld_vec tcg_gen_ld_vec_tricore +#define tcg_gen_st_vec tcg_gen_st_vec_tricore +#define tcg_gen_stl_vec tcg_gen_stl_vec_tricore +#define tcg_gen_and_vec tcg_gen_and_vec_tricore +#define tcg_gen_or_vec tcg_gen_or_vec_tricore +#define tcg_gen_xor_vec tcg_gen_xor_vec_tricore +#define tcg_gen_andc_vec tcg_gen_andc_vec_tricore +#define tcg_gen_orc_vec tcg_gen_orc_vec_tricore +#define tcg_gen_nand_vec tcg_gen_nand_vec_tricore +#define tcg_gen_nor_vec tcg_gen_nor_vec_tricore +#define tcg_gen_eqv_vec tcg_gen_eqv_vec_tricore +#define tcg_gen_not_vec tcg_gen_not_vec_tricore +#define tcg_gen_neg_vec tcg_gen_neg_vec_tricore +#define tcg_gen_abs_vec tcg_gen_abs_vec_tricore +#define tcg_gen_shli_vec tcg_gen_shli_vec_tricore +#define tcg_gen_shri_vec tcg_gen_shri_vec_tricore +#define tcg_gen_sari_vec tcg_gen_sari_vec_tricore +#define tcg_gen_cmp_vec tcg_gen_cmp_vec_tricore +#define tcg_gen_add_vec tcg_gen_add_vec_tricore +#define tcg_gen_sub_vec tcg_gen_sub_vec_tricore +#define tcg_gen_mul_vec tcg_gen_mul_vec_tricore +#define tcg_gen_ssadd_vec tcg_gen_ssadd_vec_tricore +#define tcg_gen_usadd_vec tcg_gen_usadd_vec_tricore +#define tcg_gen_sssub_vec tcg_gen_sssub_vec_tricore +#define tcg_gen_ussub_vec tcg_gen_ussub_vec_tricore +#define tcg_gen_smin_vec tcg_gen_smin_vec_tricore +#define tcg_gen_umin_vec tcg_gen_umin_vec_tricore +#define tcg_gen_smax_vec tcg_gen_smax_vec_tricore +#define tcg_gen_umax_vec tcg_gen_umax_vec_tricore +#define tcg_gen_shlv_vec tcg_gen_shlv_vec_tricore +#define tcg_gen_shrv_vec tcg_gen_shrv_vec_tricore +#define tcg_gen_sarv_vec tcg_gen_sarv_vec_tricore +#define tcg_gen_shls_vec tcg_gen_shls_vec_tricore +#define tcg_gen_shrs_vec tcg_gen_shrs_vec_tricore +#define tcg_gen_sars_vec tcg_gen_sars_vec_tricore +#define tcg_gen_bitsel_vec tcg_gen_bitsel_vec_tricore +#define tcg_gen_cmpsel_vec tcg_gen_cmpsel_vec_tricore +#define tb_htable_lookup tb_htable_lookup_tricore +#define tb_set_jmp_target tb_set_jmp_target_tricore +#define cpu_exec cpu_exec_tricore +#define cpu_loop_exit_noexc cpu_loop_exit_noexc_tricore +#define cpu_reloading_memory_map cpu_reloading_memory_map_tricore +#define cpu_loop_exit cpu_loop_exit_tricore +#define cpu_loop_exit_restore cpu_loop_exit_restore_tricore +#define cpu_loop_exit_atomic cpu_loop_exit_atomic_tricore +#define tlb_init tlb_init_tricore +#define tlb_flush_by_mmuidx tlb_flush_by_mmuidx_tricore +#define tlb_flush tlb_flush_tricore +#define tlb_flush_by_mmuidx_all_cpus tlb_flush_by_mmuidx_all_cpus_tricore +#define tlb_flush_all_cpus tlb_flush_all_cpus_tricore +#define tlb_flush_by_mmuidx_all_cpus_synced tlb_flush_by_mmuidx_all_cpus_synced_tricore +#define tlb_flush_all_cpus_synced tlb_flush_all_cpus_synced_tricore +#define tlb_flush_page_by_mmuidx tlb_flush_page_by_mmuidx_tricore +#define tlb_flush_page tlb_flush_page_tricore +#define tlb_flush_page_by_mmuidx_all_cpus tlb_flush_page_by_mmuidx_all_cpus_tricore +#define tlb_flush_page_all_cpus tlb_flush_page_all_cpus_tricore +#define tlb_flush_page_by_mmuidx_all_cpus_synced tlb_flush_page_by_mmuidx_all_cpus_synced_tricore +#define tlb_flush_page_all_cpus_synced tlb_flush_page_all_cpus_synced_tricore +#define tlb_protect_code tlb_protect_code_tricore +#define tlb_unprotect_code tlb_unprotect_code_tricore +#define tlb_reset_dirty tlb_reset_dirty_tricore +#define tlb_set_dirty tlb_set_dirty_tricore +#define tlb_set_page_with_attrs tlb_set_page_with_attrs_tricore +#define tlb_set_page tlb_set_page_tricore +#define get_page_addr_code_hostp get_page_addr_code_hostp_tricore +#define get_page_addr_code get_page_addr_code_tricore +#define probe_access probe_access_tricore +#define tlb_vaddr_to_host tlb_vaddr_to_host_tricore +#define helper_ret_ldub_mmu helper_ret_ldub_mmu_tricore +#define helper_le_lduw_mmu helper_le_lduw_mmu_tricore +#define helper_be_lduw_mmu helper_be_lduw_mmu_tricore +#define helper_le_ldul_mmu helper_le_ldul_mmu_tricore +#define helper_be_ldul_mmu helper_be_ldul_mmu_tricore +#define helper_le_ldq_mmu helper_le_ldq_mmu_tricore +#define helper_be_ldq_mmu helper_be_ldq_mmu_tricore +#define helper_ret_ldsb_mmu helper_ret_ldsb_mmu_tricore +#define helper_le_ldsw_mmu helper_le_ldsw_mmu_tricore +#define helper_be_ldsw_mmu helper_be_ldsw_mmu_tricore +#define helper_le_ldsl_mmu helper_le_ldsl_mmu_tricore +#define helper_be_ldsl_mmu helper_be_ldsl_mmu_tricore +#define cpu_ldub_mmuidx_ra cpu_ldub_mmuidx_ra_tricore +#define cpu_ldsb_mmuidx_ra cpu_ldsb_mmuidx_ra_tricore +#define cpu_lduw_mmuidx_ra cpu_lduw_mmuidx_ra_tricore +#define cpu_ldsw_mmuidx_ra cpu_ldsw_mmuidx_ra_tricore +#define cpu_ldl_mmuidx_ra cpu_ldl_mmuidx_ra_tricore +#define cpu_ldq_mmuidx_ra cpu_ldq_mmuidx_ra_tricore +#define cpu_ldub_data_ra cpu_ldub_data_ra_tricore +#define cpu_ldsb_data_ra cpu_ldsb_data_ra_tricore +#define cpu_lduw_data_ra cpu_lduw_data_ra_tricore +#define cpu_ldsw_data_ra cpu_ldsw_data_ra_tricore +#define cpu_ldl_data_ra cpu_ldl_data_ra_tricore +#define cpu_ldq_data_ra cpu_ldq_data_ra_tricore +#define cpu_ldub_data cpu_ldub_data_tricore +#define cpu_ldsb_data cpu_ldsb_data_tricore +#define cpu_lduw_data cpu_lduw_data_tricore +#define cpu_ldsw_data cpu_ldsw_data_tricore +#define cpu_ldl_data cpu_ldl_data_tricore +#define cpu_ldq_data cpu_ldq_data_tricore +#define helper_ret_stb_mmu helper_ret_stb_mmu_tricore +#define helper_le_stw_mmu helper_le_stw_mmu_tricore +#define helper_be_stw_mmu helper_be_stw_mmu_tricore +#define helper_le_stl_mmu helper_le_stl_mmu_tricore +#define helper_be_stl_mmu helper_be_stl_mmu_tricore +#define helper_le_stq_mmu helper_le_stq_mmu_tricore +#define helper_be_stq_mmu helper_be_stq_mmu_tricore +#define cpu_stb_mmuidx_ra cpu_stb_mmuidx_ra_tricore +#define cpu_stw_mmuidx_ra cpu_stw_mmuidx_ra_tricore +#define cpu_stl_mmuidx_ra cpu_stl_mmuidx_ra_tricore +#define cpu_stq_mmuidx_ra cpu_stq_mmuidx_ra_tricore +#define cpu_stb_data_ra cpu_stb_data_ra_tricore +#define cpu_stw_data_ra cpu_stw_data_ra_tricore +#define cpu_stl_data_ra cpu_stl_data_ra_tricore +#define cpu_stq_data_ra cpu_stq_data_ra_tricore +#define cpu_stb_data cpu_stb_data_tricore +#define cpu_stw_data cpu_stw_data_tricore +#define cpu_stl_data cpu_stl_data_tricore +#define cpu_stq_data cpu_stq_data_tricore +#define helper_atomic_cmpxchgb_mmu helper_atomic_cmpxchgb_mmu_tricore +#define helper_atomic_xchgb_mmu helper_atomic_xchgb_mmu_tricore +#define helper_atomic_fetch_addb_mmu helper_atomic_fetch_addb_mmu_tricore +#define helper_atomic_fetch_andb_mmu helper_atomic_fetch_andb_mmu_tricore +#define helper_atomic_fetch_orb_mmu helper_atomic_fetch_orb_mmu_tricore +#define helper_atomic_fetch_xorb_mmu helper_atomic_fetch_xorb_mmu_tricore +#define helper_atomic_add_fetchb_mmu helper_atomic_add_fetchb_mmu_tricore +#define helper_atomic_and_fetchb_mmu helper_atomic_and_fetchb_mmu_tricore +#define helper_atomic_or_fetchb_mmu helper_atomic_or_fetchb_mmu_tricore +#define helper_atomic_xor_fetchb_mmu helper_atomic_xor_fetchb_mmu_tricore +#define helper_atomic_fetch_sminb_mmu helper_atomic_fetch_sminb_mmu_tricore +#define helper_atomic_fetch_uminb_mmu helper_atomic_fetch_uminb_mmu_tricore +#define helper_atomic_fetch_smaxb_mmu helper_atomic_fetch_smaxb_mmu_tricore +#define helper_atomic_fetch_umaxb_mmu helper_atomic_fetch_umaxb_mmu_tricore +#define helper_atomic_smin_fetchb_mmu helper_atomic_smin_fetchb_mmu_tricore +#define helper_atomic_umin_fetchb_mmu helper_atomic_umin_fetchb_mmu_tricore +#define helper_atomic_smax_fetchb_mmu helper_atomic_smax_fetchb_mmu_tricore +#define helper_atomic_umax_fetchb_mmu helper_atomic_umax_fetchb_mmu_tricore +#define helper_atomic_cmpxchgw_le_mmu helper_atomic_cmpxchgw_le_mmu_tricore +#define helper_atomic_xchgw_le_mmu helper_atomic_xchgw_le_mmu_tricore +#define helper_atomic_fetch_addw_le_mmu helper_atomic_fetch_addw_le_mmu_tricore +#define helper_atomic_fetch_andw_le_mmu helper_atomic_fetch_andw_le_mmu_tricore +#define helper_atomic_fetch_orw_le_mmu helper_atomic_fetch_orw_le_mmu_tricore +#define helper_atomic_fetch_xorw_le_mmu helper_atomic_fetch_xorw_le_mmu_tricore +#define helper_atomic_add_fetchw_le_mmu helper_atomic_add_fetchw_le_mmu_tricore +#define helper_atomic_and_fetchw_le_mmu helper_atomic_and_fetchw_le_mmu_tricore +#define helper_atomic_or_fetchw_le_mmu helper_atomic_or_fetchw_le_mmu_tricore +#define helper_atomic_xor_fetchw_le_mmu helper_atomic_xor_fetchw_le_mmu_tricore +#define helper_atomic_fetch_sminw_le_mmu helper_atomic_fetch_sminw_le_mmu_tricore +#define helper_atomic_fetch_uminw_le_mmu helper_atomic_fetch_uminw_le_mmu_tricore +#define helper_atomic_fetch_smaxw_le_mmu helper_atomic_fetch_smaxw_le_mmu_tricore +#define helper_atomic_fetch_umaxw_le_mmu helper_atomic_fetch_umaxw_le_mmu_tricore +#define helper_atomic_smin_fetchw_le_mmu helper_atomic_smin_fetchw_le_mmu_tricore +#define helper_atomic_umin_fetchw_le_mmu helper_atomic_umin_fetchw_le_mmu_tricore +#define helper_atomic_smax_fetchw_le_mmu helper_atomic_smax_fetchw_le_mmu_tricore +#define helper_atomic_umax_fetchw_le_mmu helper_atomic_umax_fetchw_le_mmu_tricore +#define helper_atomic_cmpxchgw_be_mmu helper_atomic_cmpxchgw_be_mmu_tricore +#define helper_atomic_xchgw_be_mmu helper_atomic_xchgw_be_mmu_tricore +#define helper_atomic_fetch_andw_be_mmu helper_atomic_fetch_andw_be_mmu_tricore +#define helper_atomic_fetch_orw_be_mmu helper_atomic_fetch_orw_be_mmu_tricore +#define helper_atomic_fetch_xorw_be_mmu helper_atomic_fetch_xorw_be_mmu_tricore +#define helper_atomic_and_fetchw_be_mmu helper_atomic_and_fetchw_be_mmu_tricore +#define helper_atomic_or_fetchw_be_mmu helper_atomic_or_fetchw_be_mmu_tricore +#define helper_atomic_xor_fetchw_be_mmu helper_atomic_xor_fetchw_be_mmu_tricore +#define helper_atomic_fetch_sminw_be_mmu helper_atomic_fetch_sminw_be_mmu_tricore +#define helper_atomic_fetch_uminw_be_mmu helper_atomic_fetch_uminw_be_mmu_tricore +#define helper_atomic_fetch_smaxw_be_mmu helper_atomic_fetch_smaxw_be_mmu_tricore +#define helper_atomic_fetch_umaxw_be_mmu helper_atomic_fetch_umaxw_be_mmu_tricore +#define helper_atomic_smin_fetchw_be_mmu helper_atomic_smin_fetchw_be_mmu_tricore +#define helper_atomic_umin_fetchw_be_mmu helper_atomic_umin_fetchw_be_mmu_tricore +#define helper_atomic_smax_fetchw_be_mmu helper_atomic_smax_fetchw_be_mmu_tricore +#define helper_atomic_umax_fetchw_be_mmu helper_atomic_umax_fetchw_be_mmu_tricore +#define helper_atomic_fetch_addw_be_mmu helper_atomic_fetch_addw_be_mmu_tricore +#define helper_atomic_add_fetchw_be_mmu helper_atomic_add_fetchw_be_mmu_tricore +#define helper_atomic_cmpxchgl_le_mmu helper_atomic_cmpxchgl_le_mmu_tricore +#define helper_atomic_xchgl_le_mmu helper_atomic_xchgl_le_mmu_tricore +#define helper_atomic_fetch_addl_le_mmu helper_atomic_fetch_addl_le_mmu_tricore +#define helper_atomic_fetch_andl_le_mmu helper_atomic_fetch_andl_le_mmu_tricore +#define helper_atomic_fetch_orl_le_mmu helper_atomic_fetch_orl_le_mmu_tricore +#define helper_atomic_fetch_xorl_le_mmu helper_atomic_fetch_xorl_le_mmu_tricore +#define helper_atomic_add_fetchl_le_mmu helper_atomic_add_fetchl_le_mmu_tricore +#define helper_atomic_and_fetchl_le_mmu helper_atomic_and_fetchl_le_mmu_tricore +#define helper_atomic_or_fetchl_le_mmu helper_atomic_or_fetchl_le_mmu_tricore +#define helper_atomic_xor_fetchl_le_mmu helper_atomic_xor_fetchl_le_mmu_tricore +#define helper_atomic_fetch_sminl_le_mmu helper_atomic_fetch_sminl_le_mmu_tricore +#define helper_atomic_fetch_uminl_le_mmu helper_atomic_fetch_uminl_le_mmu_tricore +#define helper_atomic_fetch_smaxl_le_mmu helper_atomic_fetch_smaxl_le_mmu_tricore +#define helper_atomic_fetch_umaxl_le_mmu helper_atomic_fetch_umaxl_le_mmu_tricore +#define helper_atomic_smin_fetchl_le_mmu helper_atomic_smin_fetchl_le_mmu_tricore +#define helper_atomic_umin_fetchl_le_mmu helper_atomic_umin_fetchl_le_mmu_tricore +#define helper_atomic_smax_fetchl_le_mmu helper_atomic_smax_fetchl_le_mmu_tricore +#define helper_atomic_umax_fetchl_le_mmu helper_atomic_umax_fetchl_le_mmu_tricore +#define helper_atomic_cmpxchgl_be_mmu helper_atomic_cmpxchgl_be_mmu_tricore +#define helper_atomic_xchgl_be_mmu helper_atomic_xchgl_be_mmu_tricore +#define helper_atomic_fetch_andl_be_mmu helper_atomic_fetch_andl_be_mmu_tricore +#define helper_atomic_fetch_orl_be_mmu helper_atomic_fetch_orl_be_mmu_tricore +#define helper_atomic_fetch_xorl_be_mmu helper_atomic_fetch_xorl_be_mmu_tricore +#define helper_atomic_and_fetchl_be_mmu helper_atomic_and_fetchl_be_mmu_tricore +#define helper_atomic_or_fetchl_be_mmu helper_atomic_or_fetchl_be_mmu_tricore +#define helper_atomic_xor_fetchl_be_mmu helper_atomic_xor_fetchl_be_mmu_tricore +#define helper_atomic_fetch_sminl_be_mmu helper_atomic_fetch_sminl_be_mmu_tricore +#define helper_atomic_fetch_uminl_be_mmu helper_atomic_fetch_uminl_be_mmu_tricore +#define helper_atomic_fetch_smaxl_be_mmu helper_atomic_fetch_smaxl_be_mmu_tricore +#define helper_atomic_fetch_umaxl_be_mmu helper_atomic_fetch_umaxl_be_mmu_tricore +#define helper_atomic_smin_fetchl_be_mmu helper_atomic_smin_fetchl_be_mmu_tricore +#define helper_atomic_umin_fetchl_be_mmu helper_atomic_umin_fetchl_be_mmu_tricore +#define helper_atomic_smax_fetchl_be_mmu helper_atomic_smax_fetchl_be_mmu_tricore +#define helper_atomic_umax_fetchl_be_mmu helper_atomic_umax_fetchl_be_mmu_tricore +#define helper_atomic_fetch_addl_be_mmu helper_atomic_fetch_addl_be_mmu_tricore +#define helper_atomic_add_fetchl_be_mmu helper_atomic_add_fetchl_be_mmu_tricore +#define helper_atomic_cmpxchgq_le_mmu helper_atomic_cmpxchgq_le_mmu_tricore +#define helper_atomic_xchgq_le_mmu helper_atomic_xchgq_le_mmu_tricore +#define helper_atomic_fetch_addq_le_mmu helper_atomic_fetch_addq_le_mmu_tricore +#define helper_atomic_fetch_andq_le_mmu helper_atomic_fetch_andq_le_mmu_tricore +#define helper_atomic_fetch_orq_le_mmu helper_atomic_fetch_orq_le_mmu_tricore +#define helper_atomic_fetch_xorq_le_mmu helper_atomic_fetch_xorq_le_mmu_tricore +#define helper_atomic_add_fetchq_le_mmu helper_atomic_add_fetchq_le_mmu_tricore +#define helper_atomic_and_fetchq_le_mmu helper_atomic_and_fetchq_le_mmu_tricore +#define helper_atomic_or_fetchq_le_mmu helper_atomic_or_fetchq_le_mmu_tricore +#define helper_atomic_xor_fetchq_le_mmu helper_atomic_xor_fetchq_le_mmu_tricore +#define helper_atomic_fetch_sminq_le_mmu helper_atomic_fetch_sminq_le_mmu_tricore +#define helper_atomic_fetch_uminq_le_mmu helper_atomic_fetch_uminq_le_mmu_tricore +#define helper_atomic_fetch_smaxq_le_mmu helper_atomic_fetch_smaxq_le_mmu_tricore +#define helper_atomic_fetch_umaxq_le_mmu helper_atomic_fetch_umaxq_le_mmu_tricore +#define helper_atomic_smin_fetchq_le_mmu helper_atomic_smin_fetchq_le_mmu_tricore +#define helper_atomic_umin_fetchq_le_mmu helper_atomic_umin_fetchq_le_mmu_tricore +#define helper_atomic_smax_fetchq_le_mmu helper_atomic_smax_fetchq_le_mmu_tricore +#define helper_atomic_umax_fetchq_le_mmu helper_atomic_umax_fetchq_le_mmu_tricore +#define helper_atomic_cmpxchgq_be_mmu helper_atomic_cmpxchgq_be_mmu_tricore +#define helper_atomic_xchgq_be_mmu helper_atomic_xchgq_be_mmu_tricore +#define helper_atomic_fetch_andq_be_mmu helper_atomic_fetch_andq_be_mmu_tricore +#define helper_atomic_fetch_orq_be_mmu helper_atomic_fetch_orq_be_mmu_tricore +#define helper_atomic_fetch_xorq_be_mmu helper_atomic_fetch_xorq_be_mmu_tricore +#define helper_atomic_and_fetchq_be_mmu helper_atomic_and_fetchq_be_mmu_tricore +#define helper_atomic_or_fetchq_be_mmu helper_atomic_or_fetchq_be_mmu_tricore +#define helper_atomic_xor_fetchq_be_mmu helper_atomic_xor_fetchq_be_mmu_tricore +#define helper_atomic_fetch_sminq_be_mmu helper_atomic_fetch_sminq_be_mmu_tricore +#define helper_atomic_fetch_uminq_be_mmu helper_atomic_fetch_uminq_be_mmu_tricore +#define helper_atomic_fetch_smaxq_be_mmu helper_atomic_fetch_smaxq_be_mmu_tricore +#define helper_atomic_fetch_umaxq_be_mmu helper_atomic_fetch_umaxq_be_mmu_tricore +#define helper_atomic_smin_fetchq_be_mmu helper_atomic_smin_fetchq_be_mmu_tricore +#define helper_atomic_umin_fetchq_be_mmu helper_atomic_umin_fetchq_be_mmu_tricore +#define helper_atomic_smax_fetchq_be_mmu helper_atomic_smax_fetchq_be_mmu_tricore +#define helper_atomic_umax_fetchq_be_mmu helper_atomic_umax_fetchq_be_mmu_tricore +#define helper_atomic_fetch_addq_be_mmu helper_atomic_fetch_addq_be_mmu_tricore +#define helper_atomic_add_fetchq_be_mmu helper_atomic_add_fetchq_be_mmu_tricore +#define helper_atomic_cmpxchgb helper_atomic_cmpxchgb_tricore +#define helper_atomic_xchgb helper_atomic_xchgb_tricore +#define helper_atomic_fetch_addb helper_atomic_fetch_addb_tricore +#define helper_atomic_fetch_andb helper_atomic_fetch_andb_tricore +#define helper_atomic_fetch_orb helper_atomic_fetch_orb_tricore +#define helper_atomic_fetch_xorb helper_atomic_fetch_xorb_tricore +#define helper_atomic_add_fetchb helper_atomic_add_fetchb_tricore +#define helper_atomic_and_fetchb helper_atomic_and_fetchb_tricore +#define helper_atomic_or_fetchb helper_atomic_or_fetchb_tricore +#define helper_atomic_xor_fetchb helper_atomic_xor_fetchb_tricore +#define helper_atomic_fetch_sminb helper_atomic_fetch_sminb_tricore +#define helper_atomic_fetch_uminb helper_atomic_fetch_uminb_tricore +#define helper_atomic_fetch_smaxb helper_atomic_fetch_smaxb_tricore +#define helper_atomic_fetch_umaxb helper_atomic_fetch_umaxb_tricore +#define helper_atomic_smin_fetchb helper_atomic_smin_fetchb_tricore +#define helper_atomic_umin_fetchb helper_atomic_umin_fetchb_tricore +#define helper_atomic_smax_fetchb helper_atomic_smax_fetchb_tricore +#define helper_atomic_umax_fetchb helper_atomic_umax_fetchb_tricore +#define helper_atomic_cmpxchgw_le helper_atomic_cmpxchgw_le_tricore +#define helper_atomic_xchgw_le helper_atomic_xchgw_le_tricore +#define helper_atomic_fetch_addw_le helper_atomic_fetch_addw_le_tricore +#define helper_atomic_fetch_andw_le helper_atomic_fetch_andw_le_tricore +#define helper_atomic_fetch_orw_le helper_atomic_fetch_orw_le_tricore +#define helper_atomic_fetch_xorw_le helper_atomic_fetch_xorw_le_tricore +#define helper_atomic_add_fetchw_le helper_atomic_add_fetchw_le_tricore +#define helper_atomic_and_fetchw_le helper_atomic_and_fetchw_le_tricore +#define helper_atomic_or_fetchw_le helper_atomic_or_fetchw_le_tricore +#define helper_atomic_xor_fetchw_le helper_atomic_xor_fetchw_le_tricore +#define helper_atomic_fetch_sminw_le helper_atomic_fetch_sminw_le_tricore +#define helper_atomic_fetch_uminw_le helper_atomic_fetch_uminw_le_tricore +#define helper_atomic_fetch_smaxw_le helper_atomic_fetch_smaxw_le_tricore +#define helper_atomic_fetch_umaxw_le helper_atomic_fetch_umaxw_le_tricore +#define helper_atomic_smin_fetchw_le helper_atomic_smin_fetchw_le_tricore +#define helper_atomic_umin_fetchw_le helper_atomic_umin_fetchw_le_tricore +#define helper_atomic_smax_fetchw_le helper_atomic_smax_fetchw_le_tricore +#define helper_atomic_umax_fetchw_le helper_atomic_umax_fetchw_le_tricore +#define helper_atomic_cmpxchgw_be helper_atomic_cmpxchgw_be_tricore +#define helper_atomic_xchgw_be helper_atomic_xchgw_be_tricore +#define helper_atomic_fetch_andw_be helper_atomic_fetch_andw_be_tricore +#define helper_atomic_fetch_orw_be helper_atomic_fetch_orw_be_tricore +#define helper_atomic_fetch_xorw_be helper_atomic_fetch_xorw_be_tricore +#define helper_atomic_and_fetchw_be helper_atomic_and_fetchw_be_tricore +#define helper_atomic_or_fetchw_be helper_atomic_or_fetchw_be_tricore +#define helper_atomic_xor_fetchw_be helper_atomic_xor_fetchw_be_tricore +#define helper_atomic_fetch_sminw_be helper_atomic_fetch_sminw_be_tricore +#define helper_atomic_fetch_uminw_be helper_atomic_fetch_uminw_be_tricore +#define helper_atomic_fetch_smaxw_be helper_atomic_fetch_smaxw_be_tricore +#define helper_atomic_fetch_umaxw_be helper_atomic_fetch_umaxw_be_tricore +#define helper_atomic_smin_fetchw_be helper_atomic_smin_fetchw_be_tricore +#define helper_atomic_umin_fetchw_be helper_atomic_umin_fetchw_be_tricore +#define helper_atomic_smax_fetchw_be helper_atomic_smax_fetchw_be_tricore +#define helper_atomic_umax_fetchw_be helper_atomic_umax_fetchw_be_tricore +#define helper_atomic_fetch_addw_be helper_atomic_fetch_addw_be_tricore +#define helper_atomic_add_fetchw_be helper_atomic_add_fetchw_be_tricore +#define helper_atomic_cmpxchgl_le helper_atomic_cmpxchgl_le_tricore +#define helper_atomic_xchgl_le helper_atomic_xchgl_le_tricore +#define helper_atomic_fetch_addl_le helper_atomic_fetch_addl_le_tricore +#define helper_atomic_fetch_andl_le helper_atomic_fetch_andl_le_tricore +#define helper_atomic_fetch_orl_le helper_atomic_fetch_orl_le_tricore +#define helper_atomic_fetch_xorl_le helper_atomic_fetch_xorl_le_tricore +#define helper_atomic_add_fetchl_le helper_atomic_add_fetchl_le_tricore +#define helper_atomic_and_fetchl_le helper_atomic_and_fetchl_le_tricore +#define helper_atomic_or_fetchl_le helper_atomic_or_fetchl_le_tricore +#define helper_atomic_xor_fetchl_le helper_atomic_xor_fetchl_le_tricore +#define helper_atomic_fetch_sminl_le helper_atomic_fetch_sminl_le_tricore +#define helper_atomic_fetch_uminl_le helper_atomic_fetch_uminl_le_tricore +#define helper_atomic_fetch_smaxl_le helper_atomic_fetch_smaxl_le_tricore +#define helper_atomic_fetch_umaxl_le helper_atomic_fetch_umaxl_le_tricore +#define helper_atomic_smin_fetchl_le helper_atomic_smin_fetchl_le_tricore +#define helper_atomic_umin_fetchl_le helper_atomic_umin_fetchl_le_tricore +#define helper_atomic_smax_fetchl_le helper_atomic_smax_fetchl_le_tricore +#define helper_atomic_umax_fetchl_le helper_atomic_umax_fetchl_le_tricore +#define helper_atomic_cmpxchgl_be helper_atomic_cmpxchgl_be_tricore +#define helper_atomic_xchgl_be helper_atomic_xchgl_be_tricore +#define helper_atomic_fetch_andl_be helper_atomic_fetch_andl_be_tricore +#define helper_atomic_fetch_orl_be helper_atomic_fetch_orl_be_tricore +#define helper_atomic_fetch_xorl_be helper_atomic_fetch_xorl_be_tricore +#define helper_atomic_and_fetchl_be helper_atomic_and_fetchl_be_tricore +#define helper_atomic_or_fetchl_be helper_atomic_or_fetchl_be_tricore +#define helper_atomic_xor_fetchl_be helper_atomic_xor_fetchl_be_tricore +#define helper_atomic_fetch_sminl_be helper_atomic_fetch_sminl_be_tricore +#define helper_atomic_fetch_uminl_be helper_atomic_fetch_uminl_be_tricore +#define helper_atomic_fetch_smaxl_be helper_atomic_fetch_smaxl_be_tricore +#define helper_atomic_fetch_umaxl_be helper_atomic_fetch_umaxl_be_tricore +#define helper_atomic_smin_fetchl_be helper_atomic_smin_fetchl_be_tricore +#define helper_atomic_umin_fetchl_be helper_atomic_umin_fetchl_be_tricore +#define helper_atomic_smax_fetchl_be helper_atomic_smax_fetchl_be_tricore +#define helper_atomic_umax_fetchl_be helper_atomic_umax_fetchl_be_tricore +#define helper_atomic_fetch_addl_be helper_atomic_fetch_addl_be_tricore +#define helper_atomic_add_fetchl_be helper_atomic_add_fetchl_be_tricore +#define helper_atomic_cmpxchgq_le helper_atomic_cmpxchgq_le_tricore +#define helper_atomic_xchgq_le helper_atomic_xchgq_le_tricore +#define helper_atomic_fetch_addq_le helper_atomic_fetch_addq_le_tricore +#define helper_atomic_fetch_andq_le helper_atomic_fetch_andq_le_tricore +#define helper_atomic_fetch_orq_le helper_atomic_fetch_orq_le_tricore +#define helper_atomic_fetch_xorq_le helper_atomic_fetch_xorq_le_tricore +#define helper_atomic_add_fetchq_le helper_atomic_add_fetchq_le_tricore +#define helper_atomic_and_fetchq_le helper_atomic_and_fetchq_le_tricore +#define helper_atomic_or_fetchq_le helper_atomic_or_fetchq_le_tricore +#define helper_atomic_xor_fetchq_le helper_atomic_xor_fetchq_le_tricore +#define helper_atomic_fetch_sminq_le helper_atomic_fetch_sminq_le_tricore +#define helper_atomic_fetch_uminq_le helper_atomic_fetch_uminq_le_tricore +#define helper_atomic_fetch_smaxq_le helper_atomic_fetch_smaxq_le_tricore +#define helper_atomic_fetch_umaxq_le helper_atomic_fetch_umaxq_le_tricore +#define helper_atomic_smin_fetchq_le helper_atomic_smin_fetchq_le_tricore +#define helper_atomic_umin_fetchq_le helper_atomic_umin_fetchq_le_tricore +#define helper_atomic_smax_fetchq_le helper_atomic_smax_fetchq_le_tricore +#define helper_atomic_umax_fetchq_le helper_atomic_umax_fetchq_le_tricore +#define helper_atomic_cmpxchgq_be helper_atomic_cmpxchgq_be_tricore +#define helper_atomic_xchgq_be helper_atomic_xchgq_be_tricore +#define helper_atomic_fetch_andq_be helper_atomic_fetch_andq_be_tricore +#define helper_atomic_fetch_orq_be helper_atomic_fetch_orq_be_tricore +#define helper_atomic_fetch_xorq_be helper_atomic_fetch_xorq_be_tricore +#define helper_atomic_and_fetchq_be helper_atomic_and_fetchq_be_tricore +#define helper_atomic_or_fetchq_be helper_atomic_or_fetchq_be_tricore +#define helper_atomic_xor_fetchq_be helper_atomic_xor_fetchq_be_tricore +#define helper_atomic_fetch_sminq_be helper_atomic_fetch_sminq_be_tricore +#define helper_atomic_fetch_uminq_be helper_atomic_fetch_uminq_be_tricore +#define helper_atomic_fetch_smaxq_be helper_atomic_fetch_smaxq_be_tricore +#define helper_atomic_fetch_umaxq_be helper_atomic_fetch_umaxq_be_tricore +#define helper_atomic_smin_fetchq_be helper_atomic_smin_fetchq_be_tricore +#define helper_atomic_umin_fetchq_be helper_atomic_umin_fetchq_be_tricore +#define helper_atomic_smax_fetchq_be helper_atomic_smax_fetchq_be_tricore +#define helper_atomic_umax_fetchq_be helper_atomic_umax_fetchq_be_tricore +#define helper_atomic_fetch_addq_be helper_atomic_fetch_addq_be_tricore +#define helper_atomic_add_fetchq_be helper_atomic_add_fetchq_be_tricore +#define cpu_ldub_code cpu_ldub_code_tricore +#define cpu_lduw_code cpu_lduw_code_tricore +#define cpu_ldl_code cpu_ldl_code_tricore +#define cpu_ldq_code cpu_ldq_code_tricore +#define helper_div_i32 helper_div_i32_tricore +#define helper_rem_i32 helper_rem_i32_tricore +#define helper_divu_i32 helper_divu_i32_tricore +#define helper_remu_i32 helper_remu_i32_tricore +#define helper_shl_i64 helper_shl_i64_tricore +#define helper_shr_i64 helper_shr_i64_tricore +#define helper_sar_i64 helper_sar_i64_tricore +#define helper_div_i64 helper_div_i64_tricore +#define helper_rem_i64 helper_rem_i64_tricore +#define helper_divu_i64 helper_divu_i64_tricore +#define helper_remu_i64 helper_remu_i64_tricore +#define helper_muluh_i64 helper_muluh_i64_tricore +#define helper_mulsh_i64 helper_mulsh_i64_tricore +#define helper_clz_i32 helper_clz_i32_tricore +#define helper_ctz_i32 helper_ctz_i32_tricore +#define helper_clz_i64 helper_clz_i64_tricore +#define helper_ctz_i64 helper_ctz_i64_tricore +#define helper_clrsb_i32 helper_clrsb_i32_tricore +#define helper_clrsb_i64 helper_clrsb_i64_tricore +#define helper_ctpop_i32 helper_ctpop_i32_tricore +#define helper_ctpop_i64 helper_ctpop_i64_tricore +#define helper_lookup_tb_ptr helper_lookup_tb_ptr_tricore +#define helper_exit_atomic helper_exit_atomic_tricore +#define helper_gvec_add8 helper_gvec_add8_tricore +#define helper_gvec_add16 helper_gvec_add16_tricore +#define helper_gvec_add32 helper_gvec_add32_tricore +#define helper_gvec_add64 helper_gvec_add64_tricore +#define helper_gvec_adds8 helper_gvec_adds8_tricore +#define helper_gvec_adds16 helper_gvec_adds16_tricore +#define helper_gvec_adds32 helper_gvec_adds32_tricore +#define helper_gvec_adds64 helper_gvec_adds64_tricore +#define helper_gvec_sub8 helper_gvec_sub8_tricore +#define helper_gvec_sub16 helper_gvec_sub16_tricore +#define helper_gvec_sub32 helper_gvec_sub32_tricore +#define helper_gvec_sub64 helper_gvec_sub64_tricore +#define helper_gvec_subs8 helper_gvec_subs8_tricore +#define helper_gvec_subs16 helper_gvec_subs16_tricore +#define helper_gvec_subs32 helper_gvec_subs32_tricore +#define helper_gvec_subs64 helper_gvec_subs64_tricore +#define helper_gvec_mul8 helper_gvec_mul8_tricore +#define helper_gvec_mul16 helper_gvec_mul16_tricore +#define helper_gvec_mul32 helper_gvec_mul32_tricore +#define helper_gvec_mul64 helper_gvec_mul64_tricore +#define helper_gvec_muls8 helper_gvec_muls8_tricore +#define helper_gvec_muls16 helper_gvec_muls16_tricore +#define helper_gvec_muls32 helper_gvec_muls32_tricore +#define helper_gvec_muls64 helper_gvec_muls64_tricore +#define helper_gvec_neg8 helper_gvec_neg8_tricore +#define helper_gvec_neg16 helper_gvec_neg16_tricore +#define helper_gvec_neg32 helper_gvec_neg32_tricore +#define helper_gvec_neg64 helper_gvec_neg64_tricore +#define helper_gvec_abs8 helper_gvec_abs8_tricore +#define helper_gvec_abs16 helper_gvec_abs16_tricore +#define helper_gvec_abs32 helper_gvec_abs32_tricore +#define helper_gvec_abs64 helper_gvec_abs64_tricore +#define helper_gvec_mov helper_gvec_mov_tricore +#define helper_gvec_dup64 helper_gvec_dup64_tricore +#define helper_gvec_dup32 helper_gvec_dup32_tricore +#define helper_gvec_dup16 helper_gvec_dup16_tricore +#define helper_gvec_dup8 helper_gvec_dup8_tricore +#define helper_gvec_not helper_gvec_not_tricore +#define helper_gvec_and helper_gvec_and_tricore +#define helper_gvec_or helper_gvec_or_tricore +#define helper_gvec_xor helper_gvec_xor_tricore +#define helper_gvec_andc helper_gvec_andc_tricore +#define helper_gvec_orc helper_gvec_orc_tricore +#define helper_gvec_nand helper_gvec_nand_tricore +#define helper_gvec_nor helper_gvec_nor_tricore +#define helper_gvec_eqv helper_gvec_eqv_tricore +#define helper_gvec_ands helper_gvec_ands_tricore +#define helper_gvec_xors helper_gvec_xors_tricore +#define helper_gvec_ors helper_gvec_ors_tricore +#define helper_gvec_shl8i helper_gvec_shl8i_tricore +#define helper_gvec_shl16i helper_gvec_shl16i_tricore +#define helper_gvec_shl32i helper_gvec_shl32i_tricore +#define helper_gvec_shl64i helper_gvec_shl64i_tricore +#define helper_gvec_shr8i helper_gvec_shr8i_tricore +#define helper_gvec_shr16i helper_gvec_shr16i_tricore +#define helper_gvec_shr32i helper_gvec_shr32i_tricore +#define helper_gvec_shr64i helper_gvec_shr64i_tricore +#define helper_gvec_sar8i helper_gvec_sar8i_tricore +#define helper_gvec_sar16i helper_gvec_sar16i_tricore +#define helper_gvec_sar32i helper_gvec_sar32i_tricore +#define helper_gvec_sar64i helper_gvec_sar64i_tricore +#define helper_gvec_shl8v helper_gvec_shl8v_tricore +#define helper_gvec_shl16v helper_gvec_shl16v_tricore +#define helper_gvec_shl32v helper_gvec_shl32v_tricore +#define helper_gvec_shl64v helper_gvec_shl64v_tricore +#define helper_gvec_shr8v helper_gvec_shr8v_tricore +#define helper_gvec_shr16v helper_gvec_shr16v_tricore +#define helper_gvec_shr32v helper_gvec_shr32v_tricore +#define helper_gvec_shr64v helper_gvec_shr64v_tricore +#define helper_gvec_sar8v helper_gvec_sar8v_tricore +#define helper_gvec_sar16v helper_gvec_sar16v_tricore +#define helper_gvec_sar32v helper_gvec_sar32v_tricore +#define helper_gvec_sar64v helper_gvec_sar64v_tricore +#define helper_gvec_eq8 helper_gvec_eq8_tricore +#define helper_gvec_ne8 helper_gvec_ne8_tricore +#define helper_gvec_lt8 helper_gvec_lt8_tricore +#define helper_gvec_le8 helper_gvec_le8_tricore +#define helper_gvec_ltu8 helper_gvec_ltu8_tricore +#define helper_gvec_leu8 helper_gvec_leu8_tricore +#define helper_gvec_eq16 helper_gvec_eq16_tricore +#define helper_gvec_ne16 helper_gvec_ne16_tricore +#define helper_gvec_lt16 helper_gvec_lt16_tricore +#define helper_gvec_le16 helper_gvec_le16_tricore +#define helper_gvec_ltu16 helper_gvec_ltu16_tricore +#define helper_gvec_leu16 helper_gvec_leu16_tricore +#define helper_gvec_eq32 helper_gvec_eq32_tricore +#define helper_gvec_ne32 helper_gvec_ne32_tricore +#define helper_gvec_lt32 helper_gvec_lt32_tricore +#define helper_gvec_le32 helper_gvec_le32_tricore +#define helper_gvec_ltu32 helper_gvec_ltu32_tricore +#define helper_gvec_leu32 helper_gvec_leu32_tricore +#define helper_gvec_eq64 helper_gvec_eq64_tricore +#define helper_gvec_ne64 helper_gvec_ne64_tricore +#define helper_gvec_lt64 helper_gvec_lt64_tricore +#define helper_gvec_le64 helper_gvec_le64_tricore +#define helper_gvec_ltu64 helper_gvec_ltu64_tricore +#define helper_gvec_leu64 helper_gvec_leu64_tricore +#define helper_gvec_ssadd8 helper_gvec_ssadd8_tricore +#define helper_gvec_ssadd16 helper_gvec_ssadd16_tricore +#define helper_gvec_ssadd32 helper_gvec_ssadd32_tricore +#define helper_gvec_ssadd64 helper_gvec_ssadd64_tricore +#define helper_gvec_sssub8 helper_gvec_sssub8_tricore +#define helper_gvec_sssub16 helper_gvec_sssub16_tricore +#define helper_gvec_sssub32 helper_gvec_sssub32_tricore +#define helper_gvec_sssub64 helper_gvec_sssub64_tricore +#define helper_gvec_usadd8 helper_gvec_usadd8_tricore +#define helper_gvec_usadd16 helper_gvec_usadd16_tricore +#define helper_gvec_usadd32 helper_gvec_usadd32_tricore +#define helper_gvec_usadd64 helper_gvec_usadd64_tricore +#define helper_gvec_ussub8 helper_gvec_ussub8_tricore +#define helper_gvec_ussub16 helper_gvec_ussub16_tricore +#define helper_gvec_ussub32 helper_gvec_ussub32_tricore +#define helper_gvec_ussub64 helper_gvec_ussub64_tricore +#define helper_gvec_smin8 helper_gvec_smin8_tricore +#define helper_gvec_smin16 helper_gvec_smin16_tricore +#define helper_gvec_smin32 helper_gvec_smin32_tricore +#define helper_gvec_smin64 helper_gvec_smin64_tricore +#define helper_gvec_smax8 helper_gvec_smax8_tricore +#define helper_gvec_smax16 helper_gvec_smax16_tricore +#define helper_gvec_smax32 helper_gvec_smax32_tricore +#define helper_gvec_smax64 helper_gvec_smax64_tricore +#define helper_gvec_umin8 helper_gvec_umin8_tricore +#define helper_gvec_umin16 helper_gvec_umin16_tricore +#define helper_gvec_umin32 helper_gvec_umin32_tricore +#define helper_gvec_umin64 helper_gvec_umin64_tricore +#define helper_gvec_umax8 helper_gvec_umax8_tricore +#define helper_gvec_umax16 helper_gvec_umax16_tricore +#define helper_gvec_umax32 helper_gvec_umax32_tricore +#define helper_gvec_umax64 helper_gvec_umax64_tricore +#define helper_gvec_bitsel helper_gvec_bitsel_tricore +#define cpu_restore_state cpu_restore_state_tricore +#define page_collection_lock page_collection_lock_tricore +#define page_collection_unlock page_collection_unlock_tricore +#define free_code_gen_buffer free_code_gen_buffer_tricore +#define tcg_exec_init tcg_exec_init_tricore +#define tb_cleanup tb_cleanup_tricore +#define tb_flush tb_flush_tricore +#define tb_phys_invalidate tb_phys_invalidate_tricore +#define tb_gen_code tb_gen_code_tricore +#define tb_exec_lock tb_exec_lock_tricore +#define tb_exec_unlock tb_exec_unlock_tricore +#define tb_invalidate_phys_page_range tb_invalidate_phys_page_range_tricore +#define tb_invalidate_phys_range tb_invalidate_phys_range_tricore +#define tb_invalidate_phys_page_fast tb_invalidate_phys_page_fast_tricore +#define tb_check_watchpoint tb_check_watchpoint_tricore +#define cpu_io_recompile cpu_io_recompile_tricore +#define tb_flush_jmp_cache tb_flush_jmp_cache_tricore +#define tcg_flush_softmmu_tlb tcg_flush_softmmu_tlb_tricore +#define translator_loop_temp_check translator_loop_temp_check_tricore +#define translator_loop translator_loop_tricore +#define helper_atomic_cmpxchgo_le_mmu helper_atomic_cmpxchgo_le_mmu_tricore +#define helper_atomic_cmpxchgo_be_mmu helper_atomic_cmpxchgo_be_mmu_tricore +#define helper_atomic_ldo_le_mmu helper_atomic_ldo_le_mmu_tricore +#define helper_atomic_ldo_be_mmu helper_atomic_ldo_be_mmu_tricore +#define helper_atomic_sto_le_mmu helper_atomic_sto_le_mmu_tricore +#define helper_atomic_sto_be_mmu helper_atomic_sto_be_mmu_tricore +#define unassigned_mem_ops unassigned_mem_ops_tricore +#define floatx80_infinity floatx80_infinity_tricore +#define dup_const_func dup_const_func_tricore +#define gen_helper_raise_exception gen_helper_raise_exception_tricore +#define gen_helper_raise_interrupt gen_helper_raise_interrupt_tricore +#define gen_helper_vfp_get_fpscr gen_helper_vfp_get_fpscr_tricore +#define gen_helper_vfp_set_fpscr gen_helper_vfp_set_fpscr_tricore +#define gen_helper_cpsr_read gen_helper_cpsr_read_tricore +#define gen_helper_cpsr_write gen_helper_cpsr_write_tricore +#define helper_fadd helper_fadd_tricore +#define helper_fsub helper_fsub_tricore +#define helper_fmul helper_fmul_tricore +#define helper_fdiv helper_fdiv_tricore +#define helper_fmadd helper_fmadd_tricore +#define helper_fmsub helper_fmsub_tricore +#define helper_pack helper_pack_tricore +#define gen_intermediate_code gen_intermediate_code_tricore +#define restore_state_to_opc restore_state_to_opc_tricore +#endif diff --git a/samples/Makefile b/samples/Makefile index 5b4b28de..007e43ba 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -89,6 +89,9 @@ endif ifneq (,$(findstring m68k,$(UNICORN_ARCHS))) SOURCES += sample_m68k.c endif +ifneq (,$(findstring tricore,$(UNICORN_ARCHS))) +SOURCES += sample_tricore.c +endif BINS = $(SOURCES:.c=$(BIN_EXT)) OBJS = $(SOURCES:.c=.o) diff --git a/samples/sample_tricore.c b/samples/sample_tricore.c new file mode 100644 index 00000000..2983b2e5 --- /dev/null +++ b/samples/sample_tricore.c @@ -0,0 +1,100 @@ +/* + Created for Unicorn Engine by Eric Poole , 2022 + Copyright 2022 Aptiv +*/ + +/* Sample code to demonstrate how to emulate TriCore code */ + +#include +#include + +// code to be emulated +#define CODE "\x82\x11\xbb\x00\x00\x08" // mov d0, #0x1; mov.u d0, #0x8000 + +// memory address where emulation starts +#define ADDRESS 0x10000 + +static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, + void *user_data) +{ + printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", + address, size); +} + +static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, + void *user_data) +{ + printf(">>> Tracing instruction at 0x%" PRIx64 + ", instruction size = 0x%x\n", + address, size); +} + +static void test_tricore(void) +{ + uc_engine *uc; + uc_err err; + uc_hook trace1, trace2; + + uint32_t d0 = 0x0; // d0 register + + printf("Emulate TriCore code\n"); + + // Initialize emulator in TriCore mode + err = uc_open(UC_ARCH_TRICORE, UC_MODE_LITTLE_ENDIAN, &uc); + if (err) { + printf("Failed on uc_open() with error returned: %u (%s)\n", err, + uc_strerror(err)); + return; + } + + // map 2MB memory for this emulation + uc_mem_map(uc, ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL); + + // write machine code to be emulated to memory + uc_mem_write(uc, ADDRESS, CODE, sizeof(CODE) - 1); + + // tracing all basic blocks with customized callback + uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0); + + // tracing one instruction at ADDRESS with customized callback + uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, ADDRESS, + ADDRESS + sizeof(CODE) - 1); + + // emulate machine code in infinite time (last param = 0), or when + // finishing all the code. + err = uc_emu_start(uc, ADDRESS, ADDRESS + sizeof(CODE) - 1, 0, 0); + if (err) { + printf("Failed on uc_emu_start() with error returned: %u\n", err); + } + + // now print out some registers + printf(">>> Emulation done. Below is the CPU context\n"); + + uc_reg_read(uc, UC_TRICORE_REG_D0, &d0); + printf(">>> d0 = 0x%x\n", d0); + + uc_close(uc); +} + +int main(int argc, char **argv, char **envp) +{ + // dynamically load shared library +#ifdef DYNLOAD + if (!uc_dyn_load(NULL, 0)) { + printf("Error dynamically loading shared library.\n"); + printf("Please check that unicorn.dll/unicorn.so is available as well " + "as\n"); + printf("any other dependent dll/so files.\n"); + printf("The easiest way is to place them in the same directory as this " + "app.\n"); + return 1; + } +#endif + + test_tricore(); +#ifdef DYNLOAD + uc_dyn_free(); +#endif + + return 0; +} \ No newline at end of file diff --git a/symbols.sh b/symbols.sh index 497183cc..2968b959 100755 --- a/symbols.sh +++ b/symbols.sh @@ -6274,7 +6274,19 @@ tcg_s390_program_interrupt \ tcg_s390_data_exception \ " -ARCHS="x86_64 arm aarch64 riscv32 riscv64 mips mipsel mips64 mips64el sparc sparc64 m68k ppc ppc64 s390x" +tricore_SYMBOLS=" +helper_fadd \ +helper_fsub \ +helper_fmul \ +helper_fdiv \ +helper_fmadd \ +helper_fmsub \ +helper_pack \ +gen_intermediate_code \ +restore_state_to_opc \ +" + +ARCHS="x86_64 arm aarch64 riscv32 riscv64 mips mipsel mips64 mips64el sparc sparc64 m68k ppc ppc64 s390x tricore" for arch in $ARCHS; do diff --git a/tests/unit/test_tricore.c b/tests/unit/test_tricore.c new file mode 100644 index 00000000..f699ff9f --- /dev/null +++ b/tests/unit/test_tricore.c @@ -0,0 +1,6 @@ +#include "unicorn_test.h" + +const uint64_t code_start = 0x1000; +const uint64_t code_len = 0x4000; + +TEST_LIST = {{NULL, NULL}}; diff --git a/uc.c b/uc.c index 8a58c10d..1abe4400 100644 --- a/uc.c +++ b/uc.c @@ -24,6 +24,7 @@ #include "qemu/target/ppc/unicorn.h" #include "qemu/target/riscv/unicorn.h" #include "qemu/target/s390x/unicorn.h" +#include "qemu/target/tricore/unicorn.h" #include "qemu/include/qemu/queue.h" #include "qemu-common.h" @@ -167,6 +168,10 @@ bool uc_arch_supported(uc_arch arch) #ifdef UNICORN_HAS_S390X case UC_ARCH_S390X: return true; +#endif +#ifdef UNICORN_HAS_TRICORE + case UC_ARCH_TRICORE: + return true; #endif /* Invalid or disabled arch */ default: @@ -384,6 +389,15 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) } uc->init_arch = s390_uc_init; break; +#endif +#ifdef UNICORN_HAS_TRICORE + case UC_ARCH_TRICORE: + if ((mode & ~UC_MODE_TRICORE_MASK)) { + free(uc); + return UC_ERR_MODE; + } + uc->init_arch = tricore_uc_init; + break; #endif } @@ -798,6 +812,11 @@ uc_err uc_emu_start(uc_engine *uc, uint64_t begin, uint64_t until, case UC_ARCH_S390X: uc_reg_write(uc, UC_S390X_REG_PC, &begin); break; +#endif +#ifdef UNICORN_HAS_TRICORE + case UC_ARCH_TRICORE: + uc_reg_write(uc, UC_TRICORE_REG_PC, &begin); + break; #endif } @@ -1960,6 +1979,12 @@ static void find_context_reg_rw_function(uc_arch arch, uc_mode mode, rw->context_reg_read = s390_context_reg_read; rw->context_reg_write = s390_context_reg_write; break; +#endif +#ifdef UNICORN_HAS_TRICORE + case UC_ARCH_TRICORE: + rw->context_reg_read = tricore_context_reg_read; + rw->context_reg_write = tricore_context_reg_write; + break; #endif }